From: ribes Date: Tue, 30 Oct 2007 15:10:02 +0000 (+0000) Subject: - Kernel Documentation to doxygen X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=d23c853f67b7ec36b28dc53fc0abf6497a228d87;p=modules%2Fkernel.git - Kernel Documentation to doxygen --- diff --git a/doc/Makefile.am b/doc/Makefile.am index c504425be..cfa250da1 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -30,13 +30,13 @@ SUBDIRS = salome doc_DATA = if RST2HTML_IS_OK - doc_DATA += \ - index.html \ - UnitTests.html \ - SALOME_Application.html \ - INSTALL.html \ - kernel_resources.html \ - KERNEL_Services.html +# doc_DATA += \ +# index.html \ +# UnitTests.html \ +# SALOME_Application.html \ +# INSTALL.html \ +# kernel_resources.html \ +# KERNEL_Services.html endif EXTRA_DIST=$(doc_DATA) @@ -55,4 +55,4 @@ usr_docs: (cd salome && $(MAKE) $(AM_MAKEFLAGS) usr_docs) dev_docs: - (cd salome && $(MAKE) $(AM_MAKEFLAGS) dev_docs) \ No newline at end of file + (cd salome && $(MAKE) $(AM_MAKEFLAGS) dev_docs) diff --git a/doc/salome/Makefile.am b/doc/salome/Makefile.am index b1075e0b8..3a010768b 100644 --- a/doc/salome/Makefile.am +++ b/doc/salome/Makefile.am @@ -49,3 +49,8 @@ info_TEXINFOS = Batch.texi install-data-local: html usr_docs cp -rp $(top_builddir)/doc/salome/Batch.html $(docdir) + +EXTRA_DIST= main.dox batch.dox install.dox \ + kernel_resources.dox kernel_services.dox \ + salome_application.dox unittests.dox + diff --git a/doc/salome/install.dox b/doc/salome/install.dox new file mode 100644 index 000000000..8090ff92f --- /dev/null +++ b/doc/salome/install.dox @@ -0,0 +1,378 @@ +/*! + \page INSTALL Installation instructions + +NOT UP TO DATE %SALOME 4 +WORK in PROGRESS, INCOMPLETE DOCUMENT + +You'll find here generic instructions for installing the SALOME2 platform. + +\section Summary + +
    +
  1. \ref S1_install
  2. +
  3. \ref S2_install
  4. +
  5. \ref S3_install
  6. +
  7. \ref S4_install
  8. +
  9. \ref S5_install
  10. +
  11. \ref S6_install
  12. +
  13. \ref S7_install
  14. +
  15. \ref S8_install
  16. +
+ +\section S1_install Quick Overview + +First of all, you have to check (or install if needed) the dependant +software programs on your system. These programs are: + +- common development tools as gcc, automake, autoconf and libtools. +- third party softwares used in SALOME building or runtime process + (python, OCC, VTK, ...) + +Further details can be found in sections [2] and [3]. + +If the dependencies are installed on your system, then you have to set +your shell environment to get access to the software components +(cf. [4]. "Preparing the shell environment"). + +The next step is to install the KERNEL (cf. [5] "Installing KERNEL"): + +\code +$ mkdir +$ mkdir +$ cd +$ ./build_configure +$ cd +$ /configure --prefix= +$ make +$ make install +\endcode + +Then, the %SALOME components GEOM, MED, VISU, ... can be installed +with a similar procedure (cf. [6]). + +Eventually, the platform ccodean be run by executing the shell script +runSalome (cf. [7]). Here, somme additionnal variables have to be set +to describe the %SALOME runtime configuration (_ROOT_DIR, +OMNIORB_CONFIG) + +The following provides you with specific instructions for each step. + + +\section S2_install System configuration + +%SALOME is compiled and tested on differents platforms with native packages: +- Debian sarge +- Mandrake 10.1 +- ... + +If you have another platform, we suggest the following configuration +for building process: + +- gcc-3.3.x or 3.4.x +- automake-1.7 or more (only aclocal is used) +- autoconf-2.59 +- libtool-1.5.6 + +remarks: + +- This is the minimum level of automake, autoconf and libtool, if you need + to compile all the third party softwares (included OpenCascade 5.2.x). + +\section S3_install Third-party dependencies + +The %SALOME platform relies on a set of third-party softwares. The +current version depends on the following list +(versions given here are from Debian Sarge, except OpenCascade, VTK and MED, +which are not Debian packages): + +- CAS-5.2.4 OpenCascade (try binaries,a source patch is needed) +- VTK-4.2.6 VTK 3D-viewer +- PyQt-3.13 Python-Qt Wrapper +- Python-2.3.5 Python interpreter +- SWIG-1.3.24 SWIG library +- boost-1_32_0 C++ library (only include templates are used) +- hdf5-1.6.2 Files Database library +- med-2.2.2 MED Data Format support for file records +- omniORB-4.0.5 ORB used in %SALOME +- qt-x11-free-3.3.3 Qt library +- qwt-4.2 Graph components for Qt +- sip4-4.1.1 langage binding software + +And, in order to build the documentation: + +- doxygen-1.4.2 +- graphviz-2.2.1 + + +Additionnal software may be installed for optional features: + +- netgen4.3 + patch +- tix8.1.4 +- openpbs-2.3.16 +- lsf-??? + +To Do + +- Instructions for installing these software programs can be found in a + special note doc/configuration_examples/install-prerequis. +- Installation shell scripts are also provided. + These scripts have to be adapted to your own configuration. + +- See doc/configuration_examples/* + +In the following, we assume that all the third-party softwares are +installed in the same root directory, named /prerequis. +Then, your file system should probably look like:: + +\code +/prerequis/Python-2.2.2 +/prerequis/omniORB-3.0.5 +/prerequis/qt-x11-free-3.0.5 +... +\endcode + +\section S4_install Preparing the shell environment + +Some variables have to be set to get acces to third-party software +components (include files, executable, library, ...) during building +process and runtime. + +The shell file prerequis.sh, embedded in the KERNEL source package, +provides a template for setting those variables. In this example, all the +softwares are supposed to be installed in the same root directory, +named here INSTALLROOT. + +Copy the prerequis.sh in a working directory and adjust the settings +to your own configuration. To get the shell prepared, just +execute the following command in the building shell: + +\code +$ source prerequis.sh +\endcode + +(we assume here a ksh or bash mode) + + +\section S5_install Installing the KERNEL component + +We use here the notation to specify the source directory +of the KERNEL component. The shell environment is supposed to have +been set (cf. 4). + +Installing the KERNEL from a source package needs three directories: + +- the source directory, denoted here by . + +- the build directory, denoted by in the following. This + directory can't be the same directory as . + +- the install directory, denoted by in the following. This + directory can't be the same directory as or + . + +The installing process is: + +STEP 1: + preparing directories + + create the and the directories: + + \code +$ mkdir +$ mkdir +\endcode + +STEP 2: + build configure script + + go to directory and generate the "configure" script: + + \code +$ cd +$ ./build_configure + \endcode + + If it doesn't work, check your system automake tools as specified in + section [2]. + +STEP 3: + configure the building process + go to the build directory and execute the configuration process:: + + \code +$ cd +$ /configure --prefix= + \endcode + + Note that must be an absolute path. + + When the configure process is complete, check the status of + third-party softwares detection. You should have a status like:: + + \code + --------------------------------------------- + Summary + --------------------------------------------- + Configure + cc : yes + boost : yes + lex_yacc : yes + python : yes + swig : yes + threads : yes + OpenGL : yes + qt : yes + vtk : yes + hdf5 : yes + med2 : yes + omniORB : yes + occ : yes + sip : yes + pyqt : yes + qwt : yes + doxygen : yes + graphviz : no + openpbs : no + lsf : no + Default ORB : omniORB + ---------------------------------------------- + \endcode + +If a software get a status "no", then it's not "seen" in the system: + +- the software is not installed, or +- the shell environment is not set correctly. + +In this example, the software programs graphviz, openpbs and lsf are not +installed (optional for most usages). + + +STEP 4 : + Building the binary files + + Execute make in the directory:: + + \code +$ make + \endcode + +STEP 5: + Installing binary files, scripts and documentation + + Execute install target in the directory:: + + \code +$ make install + \endcode + +\section S6_install Installing the SALOME components + +TInstalling a component is done by following the same +instructions as given for the KERNEL, replacing KERNEL by + (build_configure, configure, make, make install). + +You just have to be aware of the dependencies between components: + +- MED depends on KERNEL +- GEOM depends on KERNEL +- SMESH depends on KERNEL, MED, GEOM +- VISU depends on KERNEL, MED +- SUPERV depends on KERNEL + +For example, installing the component SMESH needs the previous +installation of the KERNEL component, and then the GEOM and MED components. + +The building process uses the variables _ROOT_DIR to +localize the dependant components. The variables must be set to the +install path directory of the components (ex: +KERNEL_ROOT_DIR=). + +In the above example, the three variables KERNEL_ROOT_DIR, +GEOM_ROOT_DIR and MED_ROOT_DIR have to be set before configuring the +building process of the SMESH component (STEP 3). + + +\section S7_install Runtime + +See SALOME_Application to define your own configuration of %SALOME and run it +on one or several computers. This is the recommended way of configuration. + +The following explains the general principles. + +To run the %SALOME platform, the procedure is: + +- set the shell environment to get acces to third-party softwares: + +\code +$ source prerequis.sh +\endcode + +- define the %SALOME configuration by setting the whole set of + variables _ROOT_DIR. Here, you just have to set the + kernel and the components you need:: + + \code +$ export KERNEL_ROOT_DIR= +$ export MED_ROOT_DIR= +$ ... + \endcode + +- define the CORBA configuration file by setting the variable + OMNIORB_CONFIG. This variable must be set to a writable file + path. The file may be arbitrary chosen and doesn't need to exist + before running. We suggest:: + + \code +$ export OMNIORB_CONFIG=$HOME/.omniORB.cfg + \endcode + +- run the %SALOME platform by executing the script runSalome: + + \code +$KERNEL_ROOT_DIR/bin/salome/runSalome + \endcode + +\section S8_install Suggestions and advices + +For convenience or customization, we suggest the following organisation: + +- chose and create a root directory for the %SALOME platform, say + . + +- install the third-party softwares in a sub-directory "prerequis" + +- install the %SALOME components in a sub-directory "SALOME2" + +- make personnal copies of the files prerequis.sh and runSalome in + : + + \code +$ cp /prerequis.sh /. +$ cp /bin/salome/runSalome /. + \endcode + + Edit the file prerequis.sh and adjust it to your own configuration. + +- define the SALOME2 configuration + + This step consists in setting the KERNEL_ROOT_DIR, the whole set of + variables _ROOT_DIR you need, and the OMNIORB_CONFIG + variable. + + We suggest to create a shell file envSalome.sh containing those + settings. Then the configuration consists in loading envSalome.sh in + the runtime shell: + +\code +$ source envSalome.sh +\endcode + +- When installed with this file organisation, running %SALOME is done + with the following shell commands:: + + \code + $ source /prerequis.sh + $ source /envSalome.sh + $ ./runSalome + \endcode +*/ diff --git a/doc/salome/kernel_resources.dox b/doc/salome/kernel_resources.dox new file mode 100644 index 000000000..6401e942b --- /dev/null +++ b/doc/salome/kernel_resources.dox @@ -0,0 +1,559 @@ +/*! + +\page kernel_resources SALOME Kernel resources for developer + +WORK in PROGRESS, INCOMPLETE DOCUMENT + + +\section S1_kernel_res Abstract + +This document describes the development environment for +C++ and Python. Makefiles generation and usage are +introduced in another document: "using the %SALOME +configuration and building system environment". +Development environment is intended here as: trace and +debug macros usage; %SALOME exceptions usage, in C++ and +Python; user CORBA exceptions usage, in C++ and Python, +with and without Graphical User Interface; some general +purpose services such as singleton, used for CORBA +connection and disconnection. + +\section S2_kernel_res Trace and debug Utilities + +During the development process, an execution log is +useful to identify problems. This log contains +messages, variables values, source files names and line +numbers. It is recommended to verify assertions on +variables values and if necessary, to stop the +execution at debug time, in order to validate all parts +of code. + +
    +
  1. +Two modes: debug and release + +The goal of debug mode is to check as many features as +possible during the early stages of the development +process. The purpose of the utilities provided in +%SALOME is to help the developer to add detailed traces +and check variables values, without writing a lot of code. + +When the code is assumed to be valid, the release mode +optimizes execution, in terms of speed, memory, and +display only user level messages. + +But, some informations must always be displayed in both +modes: especially messages concerning environment or +internal errors, with version identification. When an +end user is confronted to such a message, he may refer +to a configuration documentation or send the message to +the people in charge of %SALOME installation, or to the +development team, following the kind of error. +
  2. +
  3. +C++ Macros for trace and debug + +%SALOME provides C++ macros for trace and debug. These +macros are in: + +\code +KERNEL_SRC/src/SALOMELocalTrace/utilities.h +\endcode + +This file must be included in C++ source. Some +macros are activated only in debug mode, others are +always activated. To activate the debug mode, ``_DEBUG_`` +must be defined, which is the case when %SALOME +Makefiles are generated from configure, without +options. When ``_DEBUG_`` is undefined (release mode: +``configure --disable-debug --enable-production``), the +debug mode macros are defined empty (they do nothing). +So, when switching from debug to release, it is +possible (and recommended) to let the macro calls +unchanged in the source. + +All the macros generate trace messages, stored in a +circular buffer pool. %A separate %thread reads the +messages in the buffer pool, and, depending on options +given at %SALOME start, writes the messages on the +standard output, a file, or send them via CORBA, in +case of a multi machine configuration. + +Three informations are systematically added in front of +the information displayed: + +- the %thread number from which the message come from; + +- the name of the source file in which the macros is set; + +- the line number of the source file at which the macro + is set. + +
      +
    1. +Macros defined in debug and release modes +\n +INFOS_COMPILATION + + The C++ macro INFOS_COMPILATION writes on the trace + buffer pool informations about the compiling process: + + - the name of the compiler : g++, KCC, CC, pgCC; + + - the date and the time of the compiling processing process. + + This macro INFOS_COMPILATION does not have any + argument. Moreover, it is defined in both compiling + mode : _DEBUG_ and _RELEASE_. + + Example: + + \code +#include "utilities.h" +int main(int argc , char **argv) +{ + INFOS_COMPILATION; + ... +} +INFOS(str) + \endcode +\n +INFOS + + In both compiling mode _DEBUG_ and _RELEASE_, The C++ + macro INFOS writes on the trace buffer pool %the string + which has been passed in argument by the user. + + Example: + + \code +#include "utilities.h" +int main(int argc , char **argv) +{ + ... + INFOS("NORMAL END OF THE PROCESS"); + return 0; +} + \endcode + + Displays: + + \code +main.cxx [5] : NORMAL END OF THE PROCESS + \endcode +\n +INTERRUPTION(str) + + In both compiling mode _DEBUG_ and _RELEASE_, The C++ + macro INTERRUPTION writes on the trace buffer pool the + %string, with a special ABORT type. When the %thread in + charge of collecting messages finds this message, it + terminates the application, after message treatment. + +IMMEDIATE_ABORT(str) + + In both compiling mode _DEBUG_ and _RELEASE_, The C++ + macro IMMEDIATE_ABORT writes the message str immediately on + standard error and exits the application. Remaining + messages not treated by the message collector %thread + are lost. + +
    2. +
    3. +Macros defined only in debug mode +\n +MESSAGE(str) + + In _DEBUG_ compiling mode only, the C++ macro MESSAGE + writes on the trace buffer pool the %string which has + been passed in argument by the user. In _RELEASE_ + compiling mode, this macro is blank. + + Example: + + \code +#include "utilities.h" +#include + +using namespace std; + +int main(int argc , char **argv) +{ + ... + const char *str = "Salome"; + MESSAGE(str); + ... const string st; + st = "Aster"; + MESSAGE(c_str(st+" and CASTEM")); + return 0; +} + + \endcode + + Displays: + + \code +- Trace main.cxx [8] : Salome +- Trace main.cxx [12] : Aster and CASTEM + \endcode + +\n +BEGIN_OF(func_name) + + In _DEBUG_ compiling mode, The C++ macro BEGIN_OF + appends the %string "Begin of " to the one passed in + argument by the user and displays the result on the + trace buffer pool. In _RELEASE_ compiling mode, this + macro is blank. + + Example: + + \code +#include "utilities.h" +int main(int argc , char **argv) +{ + BEGIN_OF(argv[0]); + return 0; +} + \endcode + + Displays: + + \code + - Trace main.cxx [3] : Begin of a.out + \endcode +\n +END_OF(func_name) + + In _DEBUG_ compiling mode, The C++ macro END_OF appends + the %string "Normal end of " to the one passed in + argument by the user and displays the result on the + trace buffer pool. In _RELEASE_ compiling mode, this + macro is blank. + + Example: + + \code +#include "utilities.h" +int main(int argc , char **argv) +{ + END_OF(argv[0]); + return 0; +} + \endcode + + Displays: + + \code +- Trace main.cxx [4] : Normal end of a.out + \endcode +\n +SCRUTE(var) + + In _DEBUG_ compiling mode, The C++ macro SCRUTE + displays its argument which is an application variable + followed by the value of the variable. In _RELEASE_ + compiling mode, this macro is blank. + + Example: + + \code +#include "utilities.h" +int main(int argc , char **argv) +{ + const int i=999; + if( i > 0 ) SCRUTE(i) ; i=i+1; + return 0; +} + \endcode + + Displays: + + \code +- Trace main.cxx [5] : i=999 + \endcode +\n +ASSERT(condition) + + In _DEBUG_ compiling mode only, The C++ macro ASSERT + checks the expression passed in argument to be not + NULL. If it is NULL the condition is written with the + macro INTERRUPTION (see above). The process exits after + trace of this last message. In _RELEASE_ compiling + mode, this macro is blank. N.B. : if ASSERT is already + defined, this macro is ignored. + + Example: + + \code +#include "utilities.h" +... +const char *ptrS = fonc(); +ASSERT(ptrS!=NULL); +cout << strlen(ptrS); +float table[10]; +int k; +... +ASSERT(k<10); +cout << table[k]; + \endcode + +
    4. +
    +
  4. +
+ +\section S3_kernel_res Exceptions + +
    +
  1. +C++ exceptions: class SALOME_Exception + +
      +
    1. +definition + +The class SALOME_Exception provides a generic method to +send a message, with optional source file name and line +number. This class is intended to serve as a base class +for all kinds of exceptions %SALOME code. All the +exceptions derived from SALOME_Exception could be +handled in a single catch, in which the message +associated to the exception is displayed, or sent to a +log file. + +The class SALOME_Exception inherits its behavior from +the STL class exception. +
    2. +
    3. +usage + +The header %SALOME/src/utils/utils_SALOME_Exception.hxx +must be included in the C++ source, when raised or trapped: + +\code +#include "utils_SALOME_Exception.hxx" +\endcode + +The SALOME_Exception constructor is: + +\code +SALOME_Exception( const char *text, + const char *fileName=0, + const unsigned int lineNumber=0 ); +\endcode + +The exception is raised like this: + +\code +throw SALOME_Exception("my pertinent message"); +\endcode + +or like this: + +\code +throw SALOME_Exception(LOCALIZED("my pertinent message")); +\endcode + +where LOCALIZED is a macro provided with +``utils_SALOME_Exception.hxx`` which gives file name and +line number. + +The exception is handled like this: + +\code + try +{ + ... +} +catch (const SALOME_Exception &ex) +{ + cerr << ex.what() < +
    +
  2. +
  3. +CORBA exceptions + +
      +
    1. +definition + +The idl SALOME_Exception provides a generic CORBA +exception for %SALOME, with an attribute that gives an +exception type,a message, plus optional source file +name and line number. + +This idl is intended to serve for all user CORBA +exceptions raised in %SALOME code, as IDL specification +does not support exception inheritance. So, all the +user CORBA exceptions from %SALOME could be handled in a +single catch. + +The exception types defined in idl are: + + - COMM CORBA communication problem, + + - BAD_PARAM Bad User parameters, + + - INTERNAL_ERROR application level problem (often irrecoverable). + +CORBA system and user exceptions already defined in the +packages used within %SALOME, such as OmniORB +exceptions, must be handled separately. + +
    2. +
    3. +usage +
        +
      1. +CORBA servant, C++ + + The CORBA Server header for SALOME_Exception and a + macro to throw the exception are provided with the + header ``KERNEL_SRC/src/Utils/Utils_CorbaException.hxx``: + + \code +#include "Utils_CorbaException.hxx" + \endcode + + The exception is raised with a macro which appends file + name and line number: + + \code +if (myStudyName.size() == 0) + THROW_SALOME_CORBA_EXCEPTION("No Study Name given", + SALOME::BAD_PARAM); + \endcode + +
      2. +
      3. +CORBA Client, GUI Qt C++ + + NO MORE AVAILABLE in %SALOME 3.x + + The CORBA Client header for SALOME_Exception and a Qt + function header that displays a message box are + provided in: + + ``KERNEL_SRC/src/SALOMEGUI/SALOMEGUI_QtCatchCorbaException.hxx`` + + \code +#include "SALOMEGUI_QtCatchCorbaException.hxx" + \endcode + + %A typical exchange with a CORBA Servant will be: + + \code +try +{ + ... // one ore more CORBA calls +} + +catch (const SALOME::SALOME_Exception & S_ex) +{ + QtCatchCorbaException(S_ex); +} + \endcode + +
      4. +
      5. +CORBA Client, C++, without GUI + + Nothing specific has been provided to the developer + yet. See the idl or the Qt function + SALOMEGUI_QtCatchCorbaException.hxx to see how to get + the information given by the exception %object. + +
      6. +
      +
    4. +
    +
+ +\section S4_kernel_res Miscellaneous tools + +
    +
  1. +Singleton +
      +
    1. +Definition + +%A singleton is an application data which is created and +deleted only once at the end of the application +process. The C++ compiler allows the user to create a +static singleton data before the first executable +statement. They are deleted after the last statement execution. + +The ``SINGLETON_`` template class deals with dynamic +singleton. It is useful for functor objects. For +example, an %object that connects the application to a +system at creation and disconnects the application at deletion. + +
    2. +
    3. +Usage + +To create a single instance of a POINT %object: + +\code +# include "Utils_SINGLETON.hxx" +... +POINT *ptrPoint=SINGLETON_::Instance() ; +assert(ptrPoint!=NULL) ; +\endcode + +No need to delete ptrPoint. Deletion is achieved +automatically at exit. If the user tries to create more +than one singleton by using the class method +SINGLETON_::Instance(), the pointer is returned +with the same value even if this is done in different +functions (threads ?): + +\code +POINT *p1=SINGLETON_::Instance() ; +... +POINT *p2=SINGLETON_::Instance() ; + +assert(p1==p2) +\endcode + +
    4. +
    5. +Design description + +Here are the principles features of the singleton +design: + +- the user creates an %object of class TYPE by using the + class method ``SINGLETON_::Instance()`` which + returns a pointer to the single %object ; + +- to create an %object, ``SINGLETON_::Instance()`` + uses the default constructor of class TYPE ; + +- at the same time, this class method creates a + destructor %object which is added to the generic list + of destructor objects to be executed at the end of + the application (atexit) ; + +- at the end of the application process all the + deletions are performed by the ``Nettoyage()`` C function + which executes the destruction objects end then + deletes the destructions objects themselves ; + +- the ``Nettoyage()`` C function using ``atexit()`` C function + is embedded in a static single %object ``ATEXIT_()``. + +
    6. +
    +
  2. +
+ +*/ diff --git a/doc/salome/kernel_services.dox b/doc/salome/kernel_services.dox new file mode 100644 index 000000000..d363c84cf --- /dev/null +++ b/doc/salome/kernel_services.dox @@ -0,0 +1,236 @@ +/*! + \page KERNEL_Services KERNEL Services for end user (Python interface) + +WORK in PROGRESS, INCOMPLETE DOCUMENT + +In a %SALOME application, distributed components, servers and clients use +the CORBA middleware for comunication. CORBA interfaces are defined via idl +files. All the different CORBA interfaces are available for users in Python, +see CORBA interfaces below. + +For some general purpose services, CORBA interfaces have been encapsulated +in order to provide a simple interface (encapsulation is generally done in +C++ classes, and a Python SWIG interface is also generated from C++, to +ensure a consistent behavior between C++ modules and Python modules or user +script). + +\section S1_kernel_ser General purpose services + +
    +
  1. +%SALOME services access from a Python shell + +See \ref SALOME_Application for detailed instructions to launch a Python +interpreter with full acces to the %SALOME environment and services. + +You can use the embedded Python interpreter in Grahic User Interface, or an +external interpreter, with: + +\code +./runSession +python +\endcode + +In either cases, %SALOME services access is done with: + +\code +import salome +salome.salome_init() +\endcode + +In the embedded interpreter, it is already done, but there is no problem to +do it several times, so it is preferable to add these instructions +systematically in your scripts, to allow them to work in all configurations. + +
  2. +
  3. +Container and component instanciation + +See LifeCycleCORBA for the C++ interface (Python interface obtained with SWIG +is very similar). + +In the following example, a test component provided in KERNEL is launched +in the local container, "FactoryServer", created when %SALOME starts: + +\code +import salome +salome.salome_init() + +import LifeCycleCORBA +lcc = LifeCycleCORBA.LifeCycleCORBA() +obj=lcc.FindOrLoad_Component("FactoryServer","SalomeTestComponent") + +import Engines +comp=obj._narrow(Engines.TestComponent) + +comp.Coucou(1) +\endcode + +The answer is something like: + +\code +'TestComponent_i : L = 1' +\endcode + +The _narrow() instruction is not always mandatory in Python, but sometimes +useful to be sure you have got the right type of %object. Here, Testcomponent +interface is defined in CORBA module Engines. With this example, it works also +without the _narrow() instruction: + +\code + obj.Coucou(1) +\endcode + +In the next example, a component instance is created in a specific Container +defined by it's computer hostname and it's name. Here we use the local +computer. Note that in Utils_Identity, getShortHostName() gives the short +hostname of the computer, without domain suffixes, which is used in %SALOME. +The container process is created here if it does not exists, and a new +component instance is created: + +\code +import salome +salome.salome_init() +import LifeCycleCORBA +lcc = LifeCycleCORBA.LifeCycleCORBA() + +import Utils_Identity +host = Utils_Identity.getShortHostName() + +import Engines +params={} +params['hostname']=host +params['container_name']='myContainer' +comp=lcc.LoadComponent(params,'SalomeTestComponent') +comp.Coucou(1) +\endcode + +If you want to get a list of containers and component instances, client %object +from orbmodule provides a list: + +\code +import orbmodule +clt=orbmodule.client() +clt.showNS() +\endcode + +The list looks like: + +\code +Logger. +ContainerManager.object +Containers.dir + cli70ac.dir + FactoryServerPy.object + SuperVisionContainer.object + FactoryServer.object + FactoryServer.dir + SalomeTestComponent_inst_1.object + myContainer.object + myContainer.dir + SalomeTestComponent_inst_1.object + SalomeTestComponent_inst_2.object +Registry.object +Kernel.dir + ModulCatalog.object + Session.object +Study.dir + Study2.object + extStudy_1.object + extStudy_2.object + extStudy_3.object +myStudyManager.object +SalomeAppEngine.object +\endcode + +
  4. +
  5. +File transfer service + +See SALOME_FileTransferCORBA for the C++ interface (Python interface obtained with +SWIG is very similar). + +The following example shows how to tranfer a file from a remote host to the +client computer. Remote hostname is 'cli76cc', we would like to copy +'tkcvs_8_0_3.tar.gz' from remote to local computer. %A full pathname is +required. %A container is created on remote computer if it does not exist, +to handle the file transfer: + +\code +import salome +salome.salome_init() + +import LifeCycleCORBA +remotefile="/home/prascle/tkcvs_8_0_3.tar.gz" +aFileTransfer=LifeCycleCORBA.SALOME_FileTransferCORBA('cli76cc',remotefile) +localFile=aFileTransfer.getLocalFile() +\endcode + +
  6. +
  7. +CORBA Naming service access + +See SALOME_NamingService for the C++ interface. The Python interface +SALOME_NamingServicePy is not yet derived from the C++ interface and offers +only the most useful functions. + +
  8. +
  9. +Batch services + +See \ref batch_page documentation (in french only). + +
  10. +
+ +\section S2_kernel_ser All IDL Interfaces + +
    +
  1. +Containers and component life cycle, File transfer service + +- Engines : engines CORBA module. +- Engines::Component : generic component interface. All %SALOME components inherit this interface. +- Engines::Container : host for C++ and Python components components instances +- Engines::fileTransfer : agent for file transfer created by a container copy a local file to a distent client +- Engines::fileRef : reference to a file, used by a container for file transfers +- Engines::ContainerManager : unique instance, in charge of container creation on remote computers +- Engines::MPIContainer : an exemple of parallel implementation for containers and components +- Engines::MPIObject + +
  2. +
  3. +Study management + +- SALOMEDS : SALOMEDS CORBA module +- SALOMEDS.idl +- SALOMEDS_Attributes.idl + +
  4. +
  5. +High speed transfer, object life cycle, exceptions, GUI interface... + +- SALOME : %SALOME CORBA module +- SALOME_Comm.idl +- SALOME_GenericObj.idl +- SALOME_Exception +- SALOME_Session.idl + +
  6. +
  7. +Miscelleanous + +- SALOME_ModuleCatalog +- SALOME_RessourcesCatalog +- SALOME_Registry.idl +- Logger.idl + +Other idl for test purposes +\n +- nstest.idl +- SALOME_TestComponent.idl +- SALOME_TestModuleCatalog.idl +- SALOME_TestMPIComponent.idl +- TestNotif.idl + +*/ diff --git a/doc/salome/main.dox b/doc/salome/main.dox new file mode 100644 index 000000000..c5808dd04 --- /dev/null +++ b/doc/salome/main.dox @@ -0,0 +1,80 @@ +/*! \mainpage SALOME KERNEL Reference Documentation + \image html kernel_about_4.png + + \section S1_main Introduction + + Welcome to the %SALOME KERNEL documentation ! + + Following your kind of usage of %SALOME, you will find some specific + introductory documentation, listed below. + + \section S2_main End user + +
      +
    1. + How to configure a %SALOME application + \n The end user may have to configure his own %SALOME application by selection of a + subset of availables %SALOME modules. He also may want to install his + application on several computers. + See \subpage SALOME_Application to define your own configuration of %SALOME and run it + on one or several computers. This is the recommended way of configuration. +
    2. +
    3. + How to launch %SALOME in a %SALOME application + \n See \ref SALOME_Application. +
    4. +
    5. + How to use KERNEL services in Python scripts + \n The %SALOME KERNEL offers a list of services available in Python. See \subpage KERNEL_Services. +
    6. +
    + + \section S3_main Application Integrator + + Applications integrators are in charge of configuration and installation of + specific %SALOME applications over a local network. Application Integrators + built %SALOME modules binaries from sources tarballs. + +
      +
    1. + How to install %SALOME + \n See \subpage INSTALL for general information on required configuration and + prerequisites, compilation procedure, setting environment principles. +
    2. +
    3. + How to configure a %SALOME application + \n See \ref SALOME_Application to define your own configuration of %SALOME and run it + on one or several computers. This is the recommended way of configuration. +
    4. +
    + + \section S4_main Module maintainer + + Module maintainers are in charge of the development and debug of the %SALOME + modules. Each %SALOME module is stored in a CVS base. CVS bases are organised + in separate branches for developments and debug. All official or development + releases are identified by a CVS tag. + +
      +
    1. + Source code structuration and Unit Tests + \n See \subpage UnitTests for general information on code directories structure, + unit tests associated to the different kind of classes, and how to run + the unit tests. +
    2. +
    3. + Some development utilities + \n See \subpage kernel_resources for information on basic utilities for C++ and Python + development, like trace and debug, exceptions, singleton. +
    4. +
    + + \section S5_main SALOME programming model + + You will find in the next pages informations about + specific points of %SALOME Kernel : + + - \subpage dsc_page : DSC documentation page. + - \subpage batch_page : BATCH documentation page. +*/ + diff --git a/doc/salome/salome_application.dox b/doc/salome/salome_application.dox new file mode 100644 index 000000000..0060c7794 --- /dev/null +++ b/doc/salome/salome_application.dox @@ -0,0 +1,373 @@ +/*! + \page SALOME_Application SALOME Application Concept + + Configuration for one or more computers + + + **WORK in PROGRESS, INCOMPLETE DOCUMENT** + +The following explains how to configure your own application with your list of +modules, how to define and run this application on one or more computers. + +\section S1_sal_appl General principles + +%A %SALOME application is defined by a set of modules (GEOM, SMESH, ASTER...). + +%A %SALOME User can define several %SALOME Applications. These applications are +runnable from the same user account. These applications may share the same +KERNEL and modules. Thus, the application configuration is independant of +KERNEL and must not be put in KERNEL_ROOT_DIR. + +Furthermore, prerequisites may not be the same on all the applications. + +%A %SALOME Session can run on a several computers. + +Binary modules and prerequisites are installed on the different computers. +There is no need to have all the modules on each computer (the minimum is +KERNEL). + +There is no need of standardization or centralised information on the details +of configuration on each computer (PATH, LD_LIBRARY_PATH, environment +variables) provided the application modules are version - compatible. Details +of configuration stay private to the computer, and are held by scripts on each +computer. + +There is no hierarchy between the computers (for example only one master +computer used to launch application). + +The %SALOME user has an account on all the computers. Access between +account@computer is via rsh or ssh and must be configured for use without +password (key exchange for ssh). Account may be different on each +computer. + +\section S2_sal_appl Application Directory + +There is two ways for creation of an application directory + +
      +
    1. + First way - references to different module directories + +The script createAppli.sh in ${KERNEL_ROOT_DIR}/bin/SALOME creates an +application directory with the given path in parameter. ${APPLI} is a path +relative to ${HOME}. + +The directory is only a skeleton, the user has to edit several files to +configure his own application. These files are described after, the list is: + +- env.d/atFirst.sh +- env.d/envProducts.sh +- env.d/envSALOME.sh +- CatalogResources.xml +- SALOMEApp.xml + +
    2. +
    3. + Second and easiest way - one single virtual install directory + +The user must create a %SALOME application configuration file by modifying a +copy of ${KERNEL_ROOT_DIR}/bin/SALOME/config_appli.xml. +The file describes the list of %SALOME modules used in the application, with +their respective installation path. The configuration file also defines the +path of an existing script which sets the %SALOME prerequisites, +and optionnaly, the path of samples directory (SAMPLES_SRC). +The following command:: + +\code +python /bin/SALOME/appli_gen.py --prefix= --config= +\endcode + +creates a virtual installation of %SALOME in the application directory ${APPLI} +(bin, lib, doc, share...), with, for each file (executable, script, data, +library, resources...), symbolic links to the actual file. + +Providing an existing an existing script for %SALOME prerequisites (the same one +used for modules compilation, or given with the modules installation), the +installation works without further modification for a single computer (unless +some modules needs a special environment not defined in the above script). +For a distributed application (several computers), one must copy and adapt +CatalogResources.xml from ${KERNEL_ROOT_DIR}/bin/SALOME/appliskel (see below). +
    4. +
    + +\section S3_sal_appl General rules + +Directory ${APPLI} must be created on each computer of the application. +The easiest way is to use the same relative path (to ${HOME}) on each computer. +(Sometimes it is not possible to use the same path everywhere, for instance +when ${HOME} is shared with NFS, so it is possible to define different path +following the computers). + +The ${APPLI} directory contains scripts for environment and runs. Environment +scripts must be configured (by the user) on each computer. All the environment +scripts are in the ${APPLI}/env.d directory. + +The script ${APPLI}/envd sources **all** the files (\*.sh) in ${APPLI}/env.d +in alphanumeric order (after edition, think to remove backup files). the envd +script is used by run scripts. + +
      +
    1. +env.d scripts + +With the first way of installation, each user **must define** his own +configuration for these scripts, following the above rules. +With the virtual installation (second way, above), env.d +scripts are built automatically. + + **The following is only an example proposed by createAppli.sh, (first way of installation) not working as it is**. + +- atFirst.sh + Sets the computer configuration not directly related to %SALOME, + like useful tools, default PATH. + +- envProducts.sh + Sets the %SALOME prerequisites. + +- envSALOME.sh + Sets all the MODULE_ROOT_DIR that can be used in the %SALOME application. + + SALOMEAppConfig is also defined by: + +\code +export SALOMEAppConfig=${HOME}/${APPLI} +\endcode + + where SALOMEAppConfig designates the directory containing SALOMEApp.xml. + Note that ${APPLI} is already defined by the calling scripts when + env.d/envSALOME.sh is sourced. +
    2. +
    3. +User run scripts + +The %SALOME user can use 4 scripts: + +- runAppli + Launches a %SALOME Session + (similar to ${KERNEL_ROOT_DIR}/bin/SALOME/runSALOME but with a different + name to avoid confusions). + +- runSession + Launches a shell script in the %SALOME application environment, with access + to the current (last launched) %SALOME session (naming service), if any. + Without arguments, the script is interactive. With arguments, the script + executes the command in the %SALOME application environment. + +- runConsole + Gives a python console connected to the current %SALOME Session. + It is also possible to use runSession, then python. + +- runTests + Similar to runSession, used for unit testing. runSession tries to use an + already existing naming service definition from a running session (hostname + and port number), runTests defines a new configuration for naming service + (new port number). +
    4. +
    5. +%SALOME internal run scripts + +- envd + Sets %SALOME application environment, envd is sourced by other scripts. + +For remote calls, %SALOME uses one script. + +- runRemote.sh + This script is mainly used to launch containers. The first 2 arguments + define the hostname and port userd for naming service, the remaining + arguments define the command to execute. +
    6. +
    7. +Other configuration files + +- SALOMEApp.xml + This file is similar to the default given + in ${GUI_ROOT_DIR}/share/SALOME/resources/gui + + +- CatalogRessources.xml + This files describes all the computers the application can use. The given + example is minimal and suppose ${APPLI} is the same relative path + to ${HOME}, on all the computers. %A different directory can be set on a + particular computer with a line: + +\code +appliPath="my/specific/path/on/this/computer" +\endcode + +
    8. +
    + +\section S4_sal_appl Examples of use + +
      +
    1. +Launch a %SALOME session with a GUI interface + +Launch is done with a command like:: + +\code +./runAppli --logger +\endcode + +The --logger option means here : collect all the traces from the all the +distributed process, via CORBA, in a single file : logger.log. + +There are a lot of options, a complete list is given by:: + +\code +./runAppli --help +\endcode + +Note that, without argument, runAppli is a non interactive Python application, +and, with arguments, runAppli is an interactive Python interpreter. + +Several options are already defined by default in SALOMEApp.xml files. Optional +arguments given in the command override the SALOMEApp.xml configuration. + +Several sessions can run simultaneously, each session use a different port for +CORBA naming service, so the sessions are totally separated from each other. + +When the GUI is closed, the different %SALOME servers are still running. +
    2. +
    3. +Close a %SALOME session, kill all the servers + +Inside the interactive python interpreter you get when you use runAppli +with arguments, you can kill all the servers of your session with:: + +\code +>>> killLocalPort() +\endcode + +or the servers of all the sessions with:: + +\code +>>> killAllPorts() +\endcode + +If you have no active Python interpreter connected to your session, you can +kill all the %SALOME servers of **all the sessions** on a given computer:: + +\code +./runSession killSALOME.py +\endcode + +Remember! it's the same idea in *Windows (R) operating system* (Microsoft and Windows are either registered trademarks or trademarks of + Microsoft Corporation in the United States and/or other countries) : +use the start menu to stop... + +When you use only one session at a time, you don't need more. + +To kill a given session (when several session are running), one needs +the naming service port number:: + +\code +./runSession killSALOMEWithPort 2810 +\endcode + +Note that the port number of the last launched session can be found on Linux, +in the prompt, within a runSession shell (see below). + +It is also possible to get the Naming Service host and port number of +the last launched session with:: + +\code +./runSession NSparam.py +\endcode + +
    4. +
    5. +Launch a %SALOME session without GUI interface + +This is used to launch a %SALOME Python script without GUI +(no GUI %server = SALOME_session_server) + +Example of script (test_session_geom.py): + +\code +import SALOME_session +SALOME_session.startSession(modules=["GEOM"]) +import GEOM_usinggeom +raw_input("Press a key and the servers will be killed ...") +\endcode + +This script is run in a non interactive way with:: + +\code +./runSession python test_session_geom.py +\endcode + +All the process are automatically killed when Python is closed +(with SALOME_session delete). +
    6. +
    7. +Add an external Python interpretor to a running session + +It's often easier to develop and try Python scripts outside the GUI embedded +Python interpreter. Imagine, for instance, you are writing a script involving +geometry and mesh modules. +first, launch a %SALOME session with gui, then, on another terminal:: + +\code +./runSession +python +\endcode + +Import %SALOME module. SALOME_init() without arguments creates a new study +in the running session (note: SALOME_init(n) attachs to a running session whose +studyId is n):: + +\code +import SALOME +SALOME.SALOME_init() +\endcode + +An example of script given with SMESH:: + +\code +import ex01_cube2build +\endcode + +It is possible to connect the GUI interface to the study created in the above +script with the file/connect menu, then browse study and display objects. +Further modifications on study can be done either with GUI or external script +(use refresh popup in GUI %object browser to see study modifications generated +by the external script). **AVOID modifications with GUI when a Python script +is running**. Not all the modules are protected against concurrent actions... +
    8. +
    9. +Different uses of the runSession shell interpreter + +runSession invoked without arguments gives an interactive shell with the full +environment of %SALOME (PATH, LD_LIBRARY_PATH, PYTHONPATH, other variables). +If there are running sessions of the same %SALOME application, runSession +connects to the last launched session (i.e. gets the naming service references +of the session: hostname and port) + +On Linux, the shell prompt (bash) gives information on naming service +references, hostname and port:: + +\code +[NS=cli76cc:2811]prascle@cli76cc:~/SALOME2/Run/Virtual$ +\endcode + +If there is no running session, prompt looks like:: + +\code +[NS=:]prascle@cli76cc:~/SALOME2/Run/Virtual$ +\endcode + +runSession is useful to launch any script or program which needs the complete +%SALOME environment, with or without a session already running. +For instance, to launch the ddd debugger interface on the gui %server, first +launch a %SALOME session with gui, then, on another terminal:: + +\code +./runSession ddd +\endcode + +Then attach to the running SALOME_Session_Server process. +
    10. +
    + +*/ diff --git a/doc/salome/tui/KERNEL/doxyuser.in b/doc/salome/tui/KERNEL/doxyuser.in index 4b407f61b..d3387500b 100644 --- a/doc/salome/tui/KERNEL/doxyuser.in +++ b/doc/salome/tui/KERNEL/doxyuser.in @@ -77,6 +77,7 @@ WARN_LOGFILE = log.txt # configuration options related to the input files #--------------------------------------------------------------------------- INPUT = \ + @top_srcdir@/doc/salome \ @top_srcdir@/bin \ @top_srcdir@/idl \ @top_srcdir@/src/Container \ @@ -85,7 +86,7 @@ INPUT = \ @top_srcdir@/src/Notification \ @top_srcdir@/src/Utils \ @top_srcdir@/src/DSC -FILE_PATTERNS = *.idl *.hxx *.cxx python_extension_must_be_here +FILE_PATTERNS = *.dox *.idl *.hxx *.cxx python_extension_must_be_here RECURSIVE = YES EXCLUDE = EXCLUDE_SYMLINKS = NO diff --git a/doc/salome/tui/KERNEL/sources/footer.html b/doc/salome/tui/KERNEL/sources/footer.html index 8aa47dfe0..56d52a61a 100755 --- a/doc/salome/tui/KERNEL/sources/footer.html +++ b/doc/salome/tui/KERNEL/sources/footer.html @@ -1,15 +1,10 @@ - - - + + + + + +
    +
    Copyright © 2003-2007 CEA, EDF
    +
    + + diff --git a/doc/salome/tui/KERNEL/sources/kernel_about_4.png b/doc/salome/tui/KERNEL/sources/kernel_about_4.png new file mode 100644 index 000000000..5c9c09a79 Binary files /dev/null and b/doc/salome/tui/KERNEL/sources/kernel_about_4.png differ diff --git a/doc/salome/tui/KERNEL/sources/myheader.html b/doc/salome/tui/KERNEL/sources/myheader.html index 25bfe5ec3..bddc997ec 100755 --- a/doc/salome/tui/KERNEL/sources/myheader.html +++ b/doc/salome/tui/KERNEL/sources/myheader.html @@ -2,22 +2,12 @@ - - Main Page - - + $title + + - -  - - - - - - - -
      General KERNEL Documentation    End User KERNEL Services  
    -

    - - +
    +SALOME documentation central +
    +
    diff --git a/doc/salome/tui/Makefile.am b/doc/salome/tui/Makefile.am index 07b894756..055ae5c17 100644 --- a/doc/salome/tui/Makefile.am +++ b/doc/salome/tui/Makefile.am @@ -48,13 +48,19 @@ usr_docs: fi; \ $(DOXYGEN) ./doxyuser1; \ cd ..; - $(INSTALL) -d $(docdir)/tui/KERNEL; - cp -fr $(srcdir)/KERNEL/sources/static/*.* $(docdir)/tui/KERNEL; - cp -fr $(srcdir)/KERNEL/sources/ $(docdir)/tui/KERNEL; - cp -fr KERNEL $(docdir)/tui - cp -fr $(srcdir)/KERNEL/HTML/ $(docdir)/tui/KERNEL; - cp -f $(srcdir)/pythfilter.py $(docdir)/tui/KERNEL; - cp -fr $(srcdir)/KERNEL/exemple/ $(docdir)/tui/KERNEL; +# $(INSTALL) -d $(docdir)/tui/KERNEL; + cp -fr $(srcdir)/KERNEL/sources/static/*.* $(docdir); + cp -fr $(srcdir)/KERNEL/sources/ $(docdir); + cp -fr KERNEL/*.* $(docdir) + cp -fr $(srcdir)/KERNEL/HTML/ $(docdir); + cp -f $(srcdir)/pythfilter.py $(docdir); + cp -fr $(srcdir)/KERNEL/exemple/ $(docdir); +# cp -fr $(srcdir)/KERNEL/sources/static/*.* $(docdir)/tui/KERNEL; +# cp -fr $(srcdir)/KERNEL/sources/ $(docdir)/tui/KERNEL; +# cp -fr KERNEL $(docdir)/tui +# cp -fr $(srcdir)/KERNEL/HTML/ $(docdir)/tui/KERNEL; +# cp -f $(srcdir)/pythfilter.py $(docdir)/tui/KERNEL; +# cp -fr $(srcdir)/KERNEL/exemple/ $(docdir)/tui/KERNEL; dev_docs: cp -fr $(srcdir)/KERNEL/* ./INPUT; \ diff --git a/doc/salome/unittests.dox b/doc/salome/unittests.dox new file mode 100644 index 000000000..fc5ebdd47 --- /dev/null +++ b/doc/salome/unittests.dox @@ -0,0 +1,207 @@ +/*! + +\page UnitTests Source code structuration and Unit Tests + +WORK in PROGRESS, INCOMPLETE DOCUMENT + +You will find here general information on code directories structure, +unit tests associated to the different kind of classes, and how to run +the unit tests. + +\section S1_unit SALOME KERNEL source code structuration + +
      +
    1. General structure of KERNEL_SRC + +- KERNEL_SRC : + Some README files and configuration tools for build + +- KERNEL_SRC/adm_local : + Part of the configuration files, other modules have a directory with the + same name. Not used in KERNEL. + +- KERNEL_SRC/bin : + Python and shell scripts used at run time. + Kit to install a %SALOME Application. + +- KERNEL_SRC/doc : + Kit for KERNEL end user documentation production: + public interfaces, Python, CORBA. + Integrator and Developper documentation. + +- KERNEL_SRC/idl : + All CORBA interfaces from KERNEL are regrouped here. + +- KERNEL_SRC/resources : + Configuration files for servers (examples). + Interfaces definitions for KERNEL test components. + +- KERNEL_SRC/salome_adm : + Configuration files used by autotools (M4 macros & co.) + +- KERNEL_SRC/src : + The source code (C++ and Python) + +
    2. +
    3. +Directory src: C++ and Python source code + +
        +
      1. +Basic services non related to CORBA + +- Basics + %A set of general purpose C++ services, not related to CORBA. + Some general purpose services that are in Utils directory (CORBA related), + are progressivley moved here, as they are not related to CORBA. + + +- SALOMELocalTrace + %A multithread trace system that allows message tracing on standard error + or a file. + +- CASCatch + Exceptions and signal handler. + +- HDFPersist + %A C++ interface to HDF. + +
      2. + +
      3. +Basic CORBA services + +- Logger : + %A CORBA %server that collects the trace messages from differents CORBA + process. + +- SALOMETraceCollector : + %A multithread trace system derived from SALOMELocalTrace, that sends messages + to Logger %server via CORBA. + +- Utils : + %A set of general purpose services related to CORBA, such as basic CORBA + exception system. See also Basics directory above. + +- NamingService : + C++ and Python interfaces to name, store and retrieve CORBA objects + +- GenericObj : + %A generic CORBA interface for CORBA objects, to count distributed references, + and to allow destruction by client. + +
      4. +
      5. +Miscellaneous CORBA servers + +- %Registry : + Implements SALOME_registry.idl. + Provides a CORBA %server library and a separate %server program. + +- ModuleCatalog : + Implements SALOME_moduleCatalog.idl. + Provide a CORBA %server library and separate %server and client programs. + +- ModuleGenerator : + Tool to generate a module catalog from CORBA idl + +- ResourcesManager : + library included in container %server + +- Notification : + library included in differents servers (container) + +- NOTIFICATION_SWIG + +
      6. + +
      7. +CORBA Containers for %SALOME Modules + +- Container + +- TestContainer + +- LifeCycleCORBA + +- LifeCycleCORBA_SWIG + +
      8. + +
      9. +STUDY %server and related interfaces and tools + +- SALOMEDSClient + +- TOOLSDS + +- SALOMEDSImpl + +- SALOMEDS + +
      10. +
      11. +Python interface to %SALOME + +- KERNEL_PY + +
      12. +
      13. +Efficient CORBA transfer services + +- Communication + +- Communication_SWIG + +
      14. +
      15. +%A Parallel container with MPI + +- MPIContainer + +- TestMPIContainer + +
      16. +
      17. +Batch interface library + +- Batch + +- Batch_SWIG + +
      18. +
      19. +Unit tests + +- UnitTests + +
      20. +
      +
    4. +
    + +\section S2_unit Tools and principles used for Unit testing + +**TO BE COMPLETED** + +Unit Testing rely on cppunit package for C++ testing, and on unittest module +for Python. See these products for general principles of unit testing. + +The cppunit package is optional. When the prerequisite is detected, the unit +tests are compiled. + +Unit Tests sources are in directories Test under the src/directories +containing the classes to test. + +Test are ordered following the order of directories given above. + +Tests can be run as a whole, or for a particular directory. In this case, only +a partial test is run (the classes to test, and the classes used, i.e. the +preceding test directories). + + +Today, only some tests are written as an example. There are not yet python +scripts in KERNEL_SRC, but it's a matter of days, there are working scripts +to test LifeCycleCORBA_SWIG interface. + +*/ diff --git a/idl/SALOMEDS.idl b/idl/SALOMEDS.idl index f4aea4bab..8538f9875 100644 --- a/idl/SALOMEDS.idl +++ b/idl/SALOMEDS.idl @@ -23,10 +23,6 @@ // Author : Yves FRICAUD // $Header$ -/*! \mainpage - \image html Application-About1.jpg -*/ - /*! \file SALOMEDS.idl This file contains a set of interfaces used for creation, managment and modification of the %Study diff --git a/src/DSC/DSC.dox b/src/DSC/DSC.dox new file mode 100644 index 000000000..e1a254625 --- /dev/null +++ b/src/DSC/DSC.dox @@ -0,0 +1,9 @@ +/*! + +\page dsc_page DSC + +DSC means Dynamic Software Component. + +*/ + + diff --git a/src/DSC/Makefile.am b/src/DSC/Makefile.am index edc4849c7..dc9af3cb8 100644 --- a/src/DSC/Makefile.am +++ b/src/DSC/Makefile.am @@ -28,3 +28,5 @@ SUBDIR_PAR = ParallelDSC endif SUBDIRS = DSC_Basic DSC_User $(SUBDIR_PAR) DSC_Python + +EXTRA_DIST= DSC.dox