For libbatch 2.4.
//! Type of the job.
/*! There are three supported types:
- "command" : execute #job_file script without %SALOME environment
+ - "command_salome" : execute #job_file script within %SALOME environment
+ (salome shell) but the %SALOME application is not
+ launched
- "python_salome" : execute #job_file python script by %SALOME
- "yacs_file" : execute #job_file by YACS module as a xml YACS schema
*/
*/
string job_file;
+ //! Pre processing script.
+ /*! This script is called on the remote resource, from #work_directory, after
+ the copy of #in_files and before submiting the job.
+ */
+ string pre_command;
+
//! Local path to a script to be sourced in the environment of the job.
/*! It may contain modifications of environment variables.
*/
//! Name of the batch queue chosen - optional
string queue;
-
+
+ //! Name of the partition - optional
+ /*! It can be used only for slurm batch managers.
+ */
+ string partition;
+
//! Specifies if the job must run in exclusive mode (without sharing nodes with other jobs)
boolean exclusive;
/*! Launching the job consists of:
- create the working directory on the remote file system
- copy the input files into the working directory
+ - launch the pre processing command if one is defined
- submit the job to the batch manager
*/
void launchJob (in long job_id) raises (SALOME::SALOME_Exception);
*/
boolean getJobDumpState(in long job_id, in string directory) raises (SALOME::SALOME_Exception);
+ //! Remove the working directory on the remote file system.
+ /*!
+ \param job_id Job id returned by createJob().
+ */
+ void clearJobWorkingDir(in long job_id) raises (SALOME::SALOME_Exception);
+
//! Retrieve one sigle file from the working directory.
/*! Use this method if you don't want to copy all the results of the job,
for instance if you want to obtain a file which contains the computing
long mem_mb;
//! required frequency
long cpu_clock;
- //! required number of nodes
+ //! required number of nodes. Can be used when submitting slurm jobs.
long nb_node;
//! required number of proc per node
long nb_proc_per_node;
${PROJECT_BINARY_DIR}/idl
)
-ADD_DEFINITIONS(${LIBXML2_DEFINITIONS} ${OMNIORB_DEFINITIONS})
+ADD_DEFINITIONS(${LIBXML2_DEFINITIONS} ${BOOST_DEFINITIONS} ${OMNIORB_DEFINITIONS})
IF(SALOME_USE_LIBBATCH)
ADD_DEFINITIONS(-DWITH_LIBBATCH)
ENDIF(SALOME_USE_LIBBATCH)
SALOMELocalTrace
SALOMEBasics
SalomeIDLKernel
+ ${Boost_FILESYSTEM_LIBRARY}
+ ${Boost_SYSTEM_LIBRARY}
${LIBBATCH_LIBRARIES}
${OMNIORB_LIBRARIES}
${LIBXML2_LIBRARIES}
SALOME_Launcher_Handler.cxx
Launcher_Job.cxx
Launcher_Job_Command.cxx
+ Launcher_Job_CommandSALOME.cxx
Launcher_Job_SALOME.cxx
Launcher_Job_PythonSALOME.cxx
Launcher_Job_YACSFile.cxx
ResourcesManager
Launcher
${LIBBATCH_LIBRARIES}
+ ${Boost_FILESYSTEM_LIBRARY}
+ ${Boost_SYSTEM_LIBRARY}
)
# TestLauncher needs only (and must need only) libxml, libbatch, ResourceManager and Launcher libraries!
Launcher.hxx
Launcher_Job.hxx
Launcher_Job_Command.hxx
+ Launcher_Job_CommandSALOME.hxx
Launcher_Job_PythonSALOME.hxx
Launcher_Job_SALOME.hxx
Launcher_Job_YACSFile.hxx
LAUNCHER_MESSAGE("getJobResult ended");
}
+//=============================================================================
+/*!
+ * Clear the remote working directory
+ */
+//=============================================================================
+void
+Launcher_cpp::clearJobWorkingDir(int job_id)
+{
+ LAUNCHER_MESSAGE("Clear the remote working directory");
+
+ // Check if job exist
+ std::map<int, Launcher::Job *>::const_iterator it_job = _launcher_job_map.find(job_id);
+ if (it_job == _launcher_job_map.end())
+ {
+ LAUNCHER_INFOS("Cannot find the job, is it created ? job number: " << job_id);
+ throw LauncherException("Cannot find the job, is it created ?");
+ }
+
+ Launcher::Job * job = it_job->second;
+ try
+ {
+ _batchmap[job_id]->clearWorkingDir(*(job->getBatchJob()));
+ }
+ catch(const Batch::GenericException &ex)
+ {
+ LAUNCHER_INFOS("getJobResult is maybe incomplete, exception: " << ex.message);
+ throw LauncherException(ex.message.c_str());
+ }
+ LAUNCHER_MESSAGE("getJobResult ended");
+}
+
//=============================================================================
/*!
* Get Job dump state - the result directory could be changed
"(libBatch was not present at compilation time)");
}
+void
+Launcher_cpp::clearJobWorkingDir(int job_id)
+{
+ LAUNCHER_INFOS("Launcher compiled without LIBBATCH - cannot clear directory!!!");
+ throw LauncherException("Method Launcher_cpp::clearJobWorkingDir is not available "
+ "(libBatch was not present at compilation time)");
+}
+
bool
Launcher_cpp::getJobDumpState(int job_id, std::string directory)
{
const char * getJobState(int job_id);
const char * getAssignedHostnames(int job_id); // Get names or ids of hosts assigned to the job
void getJobResults(int job_id, std::string directory);
+ void clearJobWorkingDir(int job_id);
bool getJobDumpState(int job_id, std::string directory);
bool getJobWorkFile(int job_id, std::string work_file, std::string directory);
void stopJob(int job_id);
//#define _DEBUG_
#include "Launcher_Job.hxx"
#include "Launcher.hxx"
+#include <boost/filesystem.hpp>
#ifdef WITH_LIBBATCH
#include <libbatch/Constants.hxx>
_job_file = "";
_job_file_name = "";
_job_file_name_complete = "";
+ _pre_command = "";
_work_directory = "";
_local_directory = "";
_result_directory = "";
_maximum_duration = "";
_maximum_duration_in_second = -1;
_queue = "";
+ _partition = "";
_job_type = "";
_exclusive = false;
_mem_per_cpu = 0;
_queue = queue;
}
+void
+Launcher::Job::setPartition(const std::string & partition)
+{
+ _partition = partition;
+}
+
void
Launcher::Job::setExclusive(bool exclusive)
{
return _queue;
}
+std::string
+Launcher::Job::getPartition() const
+{
+ return _partition;
+}
+
bool
Launcher::Job::getExclusive() const
{
return _reference;
}
+void
+Launcher::Job::setPreCommand(const std::string & preCommand)
+{
+ _pre_command = preCommand;
+}
+
+std::string
+Launcher::Job::getPreCommand() const
+{
+ return _pre_command;
+}
+
void
Launcher::Job::checkMaximumDuration(const std::string & maximum_duration)
{
params[Batch::NBPROC] = _resource_required_params.nb_proc;
params[Batch::NBPROCPERNODE] = _resource_required_params.nb_proc_per_node;
+ if(_resource_required_params.nb_node > 0)
+ params[Batch::NBNODE] = _resource_required_params.nb_node;
+
// Memory in megabytes
if (_resource_required_params.mem_mb > 0)
{
}
}
params[Batch::WORKDIR] = _work_directory;
+ std::string libbatch_pre_command("");
+ if(!_pre_command.empty())
+ {
+ boost::filesystem::path pre_command_path(_pre_command);
+ libbatch_pre_command += "./" + pre_command_path.filename().string();
+ }
+ params[Batch::PREPROCESS] = libbatch_pre_command;
// Parameters for COORM
params[Batch::LAUNCHER_FILE] = _launcher_file;
in_files.push_back(_job_file);
if (_env_file != "")
in_files.push_back(_env_file);
+ if(!_pre_command.empty())
+ in_files.push_back(_pre_command);
for(std::list<std::string>::iterator it = in_files.begin(); it != in_files.end(); it++)
{
std::string file = *it;
if (_queue != "")
params[Batch::QUEUE] = _queue;
+ // Partition
+ if (_partition != "")
+ params[Batch::PARTITION] = _partition;
+
// Exclusive
if (getExclusive())
params[Batch::EXCLUSIVE] = true;
// Common parameters
void setJobName(const std::string & job_name);
virtual void setJobFile(const std::string & job_file);
+ void setPreCommand(const std::string & preCommand);
void setWorkDirectory(const std::string & work_directory);
void setLocalDirectory(const std::string & local_directory);
void setResultDirectory(const std::string & result_directory);
void setMaximumDuration(const std::string & maximum_duration);
void setResourceRequiredParams(const resourceParams & resource_required_params);
void setQueue(const std::string & queue);
+ void setPartition(const std::string & partition);
void setEnvFile(const std::string & env_file);
void setExclusive(bool exclusive);
void setExclusiveStr(const std::string & exclusiveStr);
void setWCKey(const std::string & wckey);
void setExtraParams(const std::string & extra_params);
void setReference(const std::string & reference);
- // For COORM
- void setLauncherFile(const std::string & launcher_file);
- void setLauncherArgs(const std::string & launcher_args);
+ // For COORM
+ void setLauncherFile(const std::string & launcher_file);
+ void setLauncherArgs(const std::string & launcher_args);
std::string getJobName() const;
std::string getJobFile() const;
+ std::string getPreCommand() const;
std::string getWorkDirectory() const;
std::string getLocalDirectory() const;
std::string getResultDirectory() const;
std::string getMaximumDuration() const;
resourceParams getResourceRequiredParams() const;
std::string getQueue() const;
+ std::string getPartition() const;
std::string getEnvFile() const;
std::string getJobType() const;
bool getExclusive() const;
std::string getExtraParams() const;
std::string getReference() const;
- // For COORM
- std::string getLauncherFile() const;
- std::string getLauncherArgs() const;
+ // For COORM
+ std::string getLauncherFile() const;
+ std::string getLauncherArgs() const;
std::string updateJobState();
std::string _job_file;
std::string _job_file_name;
std::string _job_file_name_complete;
+ std::string _pre_command;
std::string _work_directory;
std::string _local_directory;
long _maximum_duration_in_second;
resourceParams _resource_required_params;
std::string _queue;
+ std::string _partition;
bool _exclusive;
unsigned long _mem_per_cpu;
std::string _wckey;
std::string _extra_params;
std::string _reference; //! Reference of the job for the batch manager
- // Parameters for COORM
- std::string _launcher_file;
- std::string _launcher_args;
+ // Parameters for COORM
+ std::string _launcher_file;
+ std::string _launcher_args;
#ifdef WITH_LIBBATCH
// Connection with LIBBATCH
#include <sstream>
-Launcher::Job_Command::Job_Command() {_job_type = "command";}
+const char Launcher::Job_Command::TYPE_NAME[] = "command";
+
+Launcher::Job_Command::Job_Command()
+{
+ _job_type = Launcher::Job_Command::TYPE_NAME;
+}
Launcher::Job_Command::~Job_Command() {}
std::string::size_type last = _env_file.find_last_of("/");
launch_script_stream << ". ./" << _env_file.substr(last+1) << std::endl;
}
- launch_script_stream << "./" << _job_file_name_complete << " > " << work_directory <<"/logs/command_" << launch_date << ".log 2>&1" << std::endl;
+ launch_script_stream << runCommandString() << std::endl;
// Return
launch_script_stream.flush();
chmod(_job_file.c_str(), 0x1ED);
return launch_script;
}
+
+std::string Launcher::Job_Command::runCommandString()
+{
+ std::ostringstream result;
+ result << "./" << _job_file_name_complete;
+ return result.str();
+}
#endif
virtual void update_job();
+ static const char TYPE_NAME[];
+
#ifdef WITH_LIBBATCH
protected:
std::string buildCommandScript(Batch::Parametre params, std::string launch_date);
+ virtual std::string runCommandString();
#endif
};
}
--- /dev/null
+// Copyright (C) 2009-2017 CEA/DEN, EDF R&D, OPEN CASCADE
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// Author: André RIBES - EDF R&D
+//
+#include "Launcher_Job_CommandSALOME.hxx"
+
+#ifdef WITH_LIBBATCH
+#include <libbatch/Constants.hxx>
+#endif
+
+#include <sstream>
+
+const char Launcher::Job_CommandSALOME::TYPE_NAME[] = "command_salome";
+
+Launcher::Job_CommandSALOME::Job_CommandSALOME()
+{
+ _job_type = Launcher::Job_CommandSALOME::TYPE_NAME;
+}
+
+Launcher::Job_CommandSALOME::~Job_CommandSALOME() {}
+
+
+#ifdef WITH_LIBBATCH
+
+std::string Launcher::Job_CommandSALOME::runCommandString()
+{
+ std::ostringstream result;
+ result << _resource_definition.AppliPath
+ << "/salome shell ./"
+ << _job_file_name_complete;
+ return result.str();
+}
+#endif
--- /dev/null
+// Copyright (C) 2009-2016 CEA/DEN, EDF R&D, OPEN CASCADE
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// Author: André RIBES - EDF R&D
+//
+#ifndef _LAUNCHER_JOB_COMMAND_SALOME_HXX_
+#define _LAUNCHER_JOB_COMMAND_SALOME_HXX_
+
+#include "Launcher_Job_Command.hxx"
+#include "Launcher.hxx"
+
+#ifdef WITH_LIBBATCH
+#include <libbatch/Job.hxx>
+#endif
+
+namespace Launcher
+{
+ class LAUNCHER_EXPORT Job_CommandSALOME : virtual public Launcher::Job_Command
+ {
+ public:
+ Job_CommandSALOME();
+ virtual ~Job_CommandSALOME();
+ static const char TYPE_NAME[];
+
+#ifdef WITH_LIBBATCH
+ protected:
+ virtual std::string runCommandString();
+#endif
+ };
+}
+
+#endif
//
#include "Launcher_Job_PythonSALOME.hxx"
+const char Launcher::Job_PythonSALOME::TYPE_NAME[] = "python_salome";
-Launcher::Job_PythonSALOME::Job_PythonSALOME() {_job_type = "python_salome";}
+Launcher::Job_PythonSALOME::Job_PythonSALOME()
+{
+ _job_type = Launcher::Job_PythonSALOME::TYPE_NAME;
+}
Launcher::Job_PythonSALOME::~Job_PythonSALOME() {}
virtual void setJobFile(const std::string & job_file);
virtual void addJobTypeSpecificScript(std::ofstream & launch_script_stream);
+ static const char TYPE_NAME[];
};
}
#include "Launcher_Job_YACSFile.hxx"
#include <sstream>
+const char Launcher::Job_YACSFile::TYPE_NAME[] = "yacs_file";
Launcher::Job_YACSFile::Job_YACSFile()
{
- _job_type = "yacs_file";
+ _job_type = Launcher::Job_YACSFile::TYPE_NAME;
_dumpState = -1;
_yacsDriverOptions = "";
}
virtual void addJobTypeSpecificScript(std::ofstream & launch_script_stream);
virtual void checkSpecificParameters();
+ static const char TYPE_NAME[];
+
protected:
int _dumpState;
std::string _yacsDriverOptions;
#include "Launcher_XML_Persistence.hxx"
#include "Launcher_Job_Command.hxx"
+#include "Launcher_Job_CommandSALOME.hxx"
#include "Launcher_Job_YACSFile.hxx"
#include "Launcher_Job_PythonSALOME.hxx"
addNode(node, "local_directory", job.getLocalDirectory());
if (!job.getResultDirectory().empty())
addNode(node, "result_directory", job.getResultDirectory());
+ if (!job.getPreCommand().empty())
+ addNode(node, "pre_command", job.getPreCommand());
// Parameters for COORM
if (!job.getLauncherFile().empty())
addNode(node, "maximum_duration", job.getMaximumDuration());
if (!job.getQueue().empty())
addNode(node, "queue", job.getQueue());
+ if (!job.getPartition().empty())
+ addNode(node, "partition", job.getPartition());
if (job.getExclusive())
addNode(node, "exclusive", job.getExclusiveStr());
if (job.getMemPerCpu() > 0)
string job_type = getAttrValue(job_node, "type");
if (job_type.empty())
throw LauncherException(string("Invalid job \"") + job_name + "\": type is not defined");
- if (job_type == "command")
+ if (job_type == Launcher::Job_Command::TYPE_NAME)
new_job = new Launcher::Job_Command();
- else if (job_type == "yacs_file")
+ else if (job_type == Launcher::Job_CommandSALOME::TYPE_NAME)
+ new_job = new Launcher::Job_CommandSALOME();
+ else if (job_type == Launcher::Job_YACSFile::TYPE_NAME)
new_job = new Launcher::Job_YACSFile();
- else if (job_type == "python_salome")
+ else if (job_type == Launcher::Job_PythonSALOME::TYPE_NAME)
new_job = new Launcher::Job_PythonSALOME();
else
{
}
else if (node_name == "env_file")
new_job->setEnvFile(getNodeContent(current_node));
+ else if (node_name == "pre_command")
+ new_job->setPreCommand(getNodeContent(current_node));
else if (node_name == "work_directory")
new_job->setWorkDirectory(getNodeContent(current_node));
else if (node_name == "local_directory")
new_job->setMaximumDuration(getNodeContent(current_node));
else if (node_name == "queue")
new_job->setQueue(getNodeContent(current_node));
+ else if (node_name == "partition")
+ new_job->setPartition(getNodeContent(current_node));
else if (node_name == "exclusive")
new_job->setExclusiveStr(getNodeContent(current_node));
else if (node_name == "mem_per_cpu")
#include "Launcher_Job_Command.hxx"
#include "Launcher_Job_YACSFile.hxx"
#include "Launcher_Job_PythonSALOME.hxx"
+#include "Launcher_Job_CommandSALOME.hxx"
#include "utilities.h"
{
std::string job_type = job_parameters.job_type.in();
- if (job_type != "command" && job_type != "yacs_file" && job_type != "python_salome")
- {
- std::string message("SALOME_Launcher::createJob: bad job type: ");
- message += job_type;
- THROW_SALOME_CORBA_EXCEPTION(message.c_str(), SALOME::INTERNAL_ERROR);
- }
-
Launcher::Job * new_job; // It is Launcher_cpp that is going to destroy it
- if (job_type == "command")
+ if (job_type == Launcher::Job_Command::TYPE_NAME)
new_job = new Launcher::Job_Command();
- else if (job_type == "yacs_file")
+ else if (job_type == Launcher::Job_CommandSALOME::TYPE_NAME)
+ new_job = new Launcher::Job_CommandSALOME();
+ else if (job_type == Launcher::Job_YACSFile::TYPE_NAME)
new_job = new Launcher::Job_YACSFile();
- else if (job_type == "python_salome")
+ else if (job_type == Launcher::Job_PythonSALOME::TYPE_NAME)
new_job = new Launcher::Job_PythonSALOME();
+ else
+ {
+ std::string message("SALOME_Launcher::createJob: bad job type: ");
+ message += job_type;
+ THROW_SALOME_CORBA_EXCEPTION(message.c_str(), SALOME::INTERNAL_ERROR);
+ }
// Name
new_job->setJobName(job_parameters.job_name.in());
INFOS(ex.msg.c_str());
THROW_SALOME_CORBA_EXCEPTION(ex.msg.c_str(),SALOME::INTERNAL_ERROR);
}
+ new_job->setPreCommand(job_parameters.pre_command.in());
// Files
std::string env_file = job_parameters.env_file.in();
std::string queue = job_parameters.queue.in();
new_job->setQueue(queue);
+ // Partition
+ std::string partition = job_parameters.partition.in();
+ new_job->setPartition(partition);
+
// Exclusive
new_job->setExclusive(job_parameters.exclusive);
}
}
+void
+SALOME_Launcher::clearJobWorkingDir(CORBA::Long job_id)
+{
+ try
+ {
+ _l.clearJobWorkingDir(job_id);
+ }
+ catch(const LauncherException &ex)
+ {
+ INFOS(ex.msg.c_str());
+ THROW_SALOME_CORBA_EXCEPTION(ex.msg.c_str(),SALOME::BAD_PARAM);
+ }
+}
+
CORBA::Boolean
SALOME_Launcher::getJobDumpState(CORBA::Long job_id, const char * directory)
{
job_parameters->work_directory = CORBA::string_dup(job->getWorkDirectory().c_str());
job_parameters->local_directory = CORBA::string_dup(job->getLocalDirectory().c_str());
job_parameters->result_directory = CORBA::string_dup(job->getResultDirectory().c_str());
+ job_parameters->pre_command = CORBA::string_dup(job->getPreCommand().c_str());
// Parameters for COORM
job_parameters->launcher_file = CORBA::string_dup(job->getLauncherFile().c_str());
job_parameters->maximum_duration = CORBA::string_dup(job->getMaximumDuration().c_str());
job_parameters->queue = CORBA::string_dup(job->getQueue().c_str());
+ job_parameters->partition = CORBA::string_dup(job->getPartition().c_str());
job_parameters->exclusive = job->getExclusive();
job_parameters->mem_per_cpu = job->getMemPerCpu();
job_parameters->wckey = CORBA::string_dup(job->getWCKey().c_str());
char * getJobState (CORBA::Long job_id);
char * getAssignedHostnames (CORBA::Long job_id); // Get names or ids of hosts assigned to the job
void getJobResults(CORBA::Long job_id, const char * directory);
+ void clearJobWorkingDir(CORBA::Long job_id);
CORBA::Boolean getJobDumpState(CORBA::Long job_id, const char * directory);
CORBA::Boolean getJobWorkFile(CORBA::Long job_id, const char * work_file, const char * directory);
void stopJob (CORBA::Long job_id);
IF(NOT WIN32)
SET(TEST_NAME ${COMPONENT_NAME}_Launcher)
- ADD_TEST(${TEST_NAME} python ${SALOME_TEST_DRIVER} ${TIMEOUT} test_launcher.py)
+ ADD_TEST(${TEST_NAME} python ${SALOME_TEST_DRIVER} 2000 test_launcher.py)
SET_TESTS_PROPERTIES(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}"
# TIMEOUT 500
)
except IOError,ex:
self.fail("IO exception:" + str(ex));
+ def create_JobParameters(self):
+ job_params = salome.JobParameters()
+ job_params.wckey="P11U50:CARBONES" #needed by edf clusters
+ job_params.resource_required = salome.ResourceParameters()
+ job_params.resource_required.nb_proc = 1
+ return job_params
+
##############################
# test of python_salome job
##############################
f.close()
local_result_dir = os.path.join(case_test_dir, "result_py_job-")
- job_params = salome.JobParameters()
+ job_params = self.create_JobParameters()
job_params.job_type = "python_salome"
job_params.job_file = job_script_file
job_params.in_files = []
job_params.out_files = ["result.txt", "subdir"]
- job_params.resource_required = salome.ResourceParameters()
- job_params.resource_required.nb_proc = 1
launcher = salome.naming_service.Resolve('/SalomeLauncher')
# job params
local_result_dir = os.path.join(case_test_dir, "result_com_job-")
- job_params = salome.JobParameters()
+ job_params = self.create_JobParameters()
job_params.job_type = "command"
job_params.job_file = script_file
job_params.env_file = env_file
job_params.in_files = [data_file]
job_params.out_files = ["result.txt", "copie"]
job_params.local_directory = case_test_dir
- job_params.resource_required = salome.ResourceParameters()
- job_params.resource_required.nb_proc = 1
# create and launch the job
launcher = salome.naming_service.Resolve('/SalomeLauncher')
f.close()
local_result_dir = os.path.join(case_test_dir, "result_yacs_job-")
- job_params = salome.JobParameters()
+ job_params = self.create_JobParameters()
job_params.job_type = "yacs_file"
job_params.job_file = job_script_file
job_params.env_file = os.path.join(case_test_dir,env_file)
# define the interval between two YACS schema dumps (3 seconds)
import Engines
job_params.specific_parameters = [Engines.Parameter("EnableDumpYACS", "3")]
- job_params.resource_required = salome.ResourceParameters()
- job_params.resource_required.nb_proc = 1
launcher = salome.naming_service.Resolve('/SalomeLauncher')
resManager= salome.lcc.getResourcesManager()
f.close()
local_result_dir = os.path.join(case_test_dir, "result_yacsopt_job-")
- job_params = salome.JobParameters()
+ job_params = self.create_JobParameters()
job_params.job_type = "yacs_file"
job_params.job_file = job_script_file
- #job_params.env_file = os.path.join(case_test_dir,env_file)
job_params.out_files = ["result.txt"]
# define the interval between two YACS schema dumps (3 seconds)
job_params.specific_parameters = [Engines.Parameter("YACSDriverOptions",
"-imynode.i=5 -imynode.d=3.7 -imynode.b=False -imynode.s=lili")]
expected_result="i=5,d=3.7,b=False,s=lili"
- job_params.resource_required = salome.ResourceParameters()
- job_params.resource_required.nb_proc = 1
launcher = salome.naming_service.Resolve('/SalomeLauncher')
resManager= salome.lcc.getResourcesManager()
self.verifyFile(os.path.join(job_params.result_directory, "result.txt"),
expected_result)
+ ############################################
+ # test of command job type with pre_command
+ ############################################
+ def test_command_pre(self):
+ case_test_dir = os.path.join(TestCompo.test_dir, "command_pre")
+ mkdir_p(case_test_dir)
+
+ # command to be run before the job
+ pre_command = "pre_command.sh"
+ pre_command_text = "echo 'it works!' > in.txt"
+ abs_pre_command_file = os.path.join(case_test_dir, pre_command)
+ f = open(abs_pre_command_file, "w")
+ f.write(pre_command_text)
+ f.close()
+ os.chmod(abs_pre_command_file, 0o755)
+
+ # job script
+ script_file = "myTestScript.py"
+ script_text = """#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+in_f = open("in.txt", "r")
+in_text = in_f.read()
+in_f.close()
+
+f = open('result.txt', 'w')
+f.write(in_text)
+f.close()
+"""
+ abs_script_file = os.path.join(case_test_dir, script_file)
+ f = open(abs_script_file, "w")
+ f.write(script_text)
+ f.close()
+ os.chmod(abs_script_file, 0o755)
+
+ # job params
+ local_result_dir = os.path.join(case_test_dir, "result_com_pre_job-")
+ job_params = self.create_JobParameters()
+ job_params.job_type = "command"
+ job_params.job_file = script_file
+ job_params.pre_command = pre_command
+ job_params.in_files = []
+ job_params.out_files = ["result.txt"]
+ job_params.local_directory = case_test_dir
+
+ # create and launch the job
+ launcher = salome.naming_service.Resolve('/SalomeLauncher')
+ resManager= salome.lcc.getResourcesManager()
+
+ for resource in self.ressources:
+ print "Testing command job on ", resource
+ job_params.result_directory = local_result_dir + resource
+ job_params.job_name = "CommandPreJob_" + resource
+ job_params.resource_required.name = resource
+
+ # use the working directory of the resource
+ resParams = resManager.GetResourceDefinition(resource)
+ wd = os.path.join(resParams.working_directory,
+ "CommandPreJob" + self.suffix)
+ job_params.work_directory = wd
+
+ job_id = launcher.createJob(job_params)
+ launcher.launchJob(job_id)
+ # wait for the end of the job
+ jobState = launcher.getJobState(job_id)
+ print "Job %d state: %s" % (job_id,jobState)
+ while jobState != "FINISHED" and jobState != "FAILED" :
+ time.sleep(3)
+ jobState = launcher.getJobState(job_id)
+ print "Job %d state: %s" % (job_id,jobState)
+ pass
+
+ # verify the results
+ self.assertEqual(jobState, "FINISHED")
+ launcher.getJobResults(job_id, "")
+ self.verifyFile(os.path.join(job_params.result_directory, "result.txt"),
+ "it works!\n")
+
+ #################################
+ # test of command salome job type
+ #################################
+ def test_command_salome(self):
+ case_test_dir = os.path.join(TestCompo.test_dir, "command_salome")
+ mkdir_p(case_test_dir)
+
+ # job script
+ data_file = "in.txt"
+ script_file = "myEnvScript.py"
+ script_text = """#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os,sys
+# verify import salome
+import salome
+
+text_result = os.getenv("ENV_TEST_VAR","")
+
+f = open('result.txt', 'w')
+f.write(text_result)
+f.close()
+
+in_f = open("in.txt", "r")
+in_text = in_f.read()
+in_f.close()
+
+os.mkdir("copie")
+f = open(os.path.join("copie",'copie.txt'), 'w')
+f.write(in_text)
+f.close()
+"""
+ abs_script_file = os.path.join(case_test_dir, script_file)
+ f = open(abs_script_file, "w")
+ f.write(script_text)
+ f.close()
+ os.chmod(abs_script_file, 0o755)
+
+ #environement script
+ env_file = "myEnv.sh"
+ env_text = """export ENV_TEST_VAR="expected"
+"""
+ f = open(os.path.join(case_test_dir, env_file), "w")
+ f.write(env_text)
+ f.close()
+
+ # write data file
+ f = open(os.path.join(case_test_dir, data_file), "w")
+ f.write("to be copied")
+ f.close()
+
+ # job params
+ local_result_dir = os.path.join(case_test_dir, "result_comsalome_job-")
+ job_params = self.create_JobParameters()
+ job_params.job_type = "command_salome"
+ job_params.job_file = script_file
+ job_params.env_file = env_file
+ job_params.in_files = [data_file]
+ job_params.out_files = ["result.txt", "copie"]
+ job_params.local_directory = case_test_dir
+
+ # create and launch the job
+ launcher = salome.naming_service.Resolve('/SalomeLauncher')
+ resManager= salome.lcc.getResourcesManager()
+
+ for resource in self.ressources:
+ print "Testing command salome job on ", resource
+ job_params.result_directory = local_result_dir + resource
+ job_params.job_name = "CommandSalomeJob_" + resource
+ job_params.resource_required.name = resource
+
+ # use the working directory of the resource
+ resParams = resManager.GetResourceDefinition(resource)
+ wd = os.path.join(resParams.working_directory,
+ "CommandSalomeJob" + self.suffix)
+ job_params.work_directory = wd
+
+ job_id = launcher.createJob(job_params)
+ launcher.launchJob(job_id)
+ # wait for the end of the job
+ jobState = launcher.getJobState(job_id)
+ print "Job %d state: %s" % (job_id,jobState)
+ while jobState != "FINISHED" and jobState != "FAILED" :
+ time.sleep(3)
+ jobState = launcher.getJobState(job_id)
+ print "Job %d state: %s" % (job_id,jobState)
+ pass
+
+ # verify the results
+ self.assertEqual(jobState, "FINISHED")
+ launcher.getJobResults(job_id, "")
+ self.verifyFile(os.path.join(job_params.result_directory, "result.txt"),
+ "expected")
+ self.verifyFile(os.path.join(job_params.result_directory,
+ "copie",'copie.txt'),
+ "to be copied")
+
+ # verify getJobWorkFile
+ mydir = os.path.join(case_test_dir, "work_dir" + resource)
+ success = launcher.getJobWorkFile(job_id, "result.txt", mydir)
+ self.assertEqual(success, True)
+ self.verifyFile(os.path.join(mydir, "result.txt"), "expected")
+
+ success = launcher.getJobWorkFile(job_id, "copie", mydir)
+ self.assertEqual(success, True)
+ self.verifyFile(os.path.join(mydir, "copie", "copie.txt"),
+ "to be copied")
+ pass
+ pass
+ pass
+
if __name__ == '__main__':
# create study
import salome
nb_proc_per_node, policy, resList)
class JobParameters (Engines.JobParameters):
- def __init__(self, job_name="", job_type="", job_file="", env_file="", in_files=None, out_files=None,
+ def __init__(self, job_name="", job_type="", job_file="", pre_command="", env_file="", in_files=None, out_files=None,
work_directory="", local_directory="", result_directory="", maximum_duration="",
- resource_required=None, queue="", exclusive = False, mem_per_cpu = 0,
+ resource_required=None, queue="", partition="", exclusive = False, mem_per_cpu = 0,
wckey = "", extra_params = "",
specific_parameters=None, launcher_file = "", launcher_args = ""):
if in_files is None:
out_files = []
if specific_parameters is None:
specific_parameters = []
- Engines.JobParameters.__init__(self, job_name, job_type, job_file, env_file, in_files, out_files,
+ Engines.JobParameters.__init__(self, job_name, job_type, job_file, pre_command, env_file, in_files, out_files,
work_directory, local_directory, result_directory, maximum_duration,
- resource_required, queue, exclusive, mem_per_cpu,
+ resource_required, queue, partition, exclusive, mem_per_cpu,
wckey, extra_params,
specific_parameters, launcher_file, launcher_args)