- mem_mb -> Memory expressed in megabytes.
- nb_proc -> Number of Processors.
*/
- ResourceParametersContainer resource_required;
+ ResourceParametersJob resource_required;
//! Name of the batch queue chosen - optional
string queue;
//! Specifies if the job must run in exclusive mode (without sharing nodes with other jobs)
boolean exclusive;
- //! Specifies the memory limit per cpu (exclusive with resource_required.mem_mb)
+ //! Specifies the memory limit per cpu
unsigned long mem_per_cpu;
//! Workload Characterization Key - mandatory on some clusters
job_parameters.wckey = job->getWCKey();
job_parameters.extra_params = job->getExtraParams();
- resourceParamsContainer resource_params = job->getResourceRequiredParams();
- job_parameters.resource_required.name = resource_params.name;
- job_parameters.resource_required.hostname = resource_params.hostname;
- job_parameters.resource_required.OS = resource_params.OS;
- job_parameters.resource_required.nb_proc = resource_params.nb_proc;
- job_parameters.resource_required.nb_node = resource_params.nb_node;
- job_parameters.resource_required.nb_proc_per_node = resource_params.nb_proc_per_node;
- job_parameters.resource_required.cpu_clock = resource_params.cpu_clock;
- job_parameters.resource_required.mem_mb = resource_params.mem_mb;
-
- job_parameters.specific_parameters = job->getSpecificParameters();
+ job_parameters.resource_required = job->getResourceRequiredParams();
+
+ job_parameters.specific_parameters = job->getSpecificParameters();
return job_parameters;
}
for(size_t i=0; i < job_params.OutputFile.size();i++)
new_job->add_out_file(job_params.OutputFile[i]);
- resourceParamsContainer p;
+ resourceParamsJob p;
p.hostname = clusterName;
p.name = "";
- p.OS = "";
- p.nb_proc = job_params.NbOfProcesses;
- p.nb_node = 0;
- p.nb_proc_per_node = 0;
- p.cpu_clock = 0;
- p.mem_mb = 0;
+ // p.OS = "";
+ // p.nb_proc = job_params.NbOfProcesses;
+ // p.nb_node = 0;
+ // p.nb_proc_per_node = 0;
+ // p.cpu_clock = 0;
+ // p.mem_mb = 0;
new_job->setResourceRequiredParams(p);
createJob(new_job.get());
// Select a resource for the job
std::vector<std::string> ResourceList;
- resourceParamsContainer params = job->getResourceRequiredParams();
+ resourceParamsJob params = job->getResourceRequiredParams();
// Consider only resources that can launch batch jobs
try
{
- ResourceList = _ResManager->GetFittingResourcesContainer(params);
+ ResourceList = _ResManager->GetFittingResourcesJob(params);
}
catch(const ResourcesException &ex)
{
std::string local_directory;
std::string result_directory;
std::string maximum_duration;
- resourceParamsContainer resource_required;
+ resourceParamsJob resource_required;
std::string queue;
std::string partition;
bool exclusive;
}
void
-Launcher::Job::setResourceRequiredParams(const resourceParamsContainer& resource_required_params)
+Launcher::Job::setResourceRequiredParams(const resourceParamsJob& resource_required_params)
{
checkResourceRequiredParams(resource_required_params);
_resource_required_params = resource_required_params;
return _launcher_args;
}
-resourceParamsContainer
+resourceParamsJob
Launcher::Job::getResourceRequiredParams() const
{
return _resource_required_params;
}
void
-Launcher::Job::checkResourceRequiredParams(const resourceParamsContainer& resource_required_params)
+Launcher::Job::checkResourceRequiredParams(const resourceParamsJob& resource_required_params)
{
// TODO: check if we need this check for a job:
// nb_proc has be to > 0
void add_in_file(const std::string & file);
void add_out_file(const std::string & file);
void setMaximumDuration(const std::string & maximum_duration);
- void setResourceRequiredParams(const resourceParamsContainer & resource_required_params);
+ void setResourceRequiredParams(const resourceParamsJob& resource_required_params);
void setQueue(const std::string & queue);
void setPartition(const std::string & partition);
void setEnvFile(const std::string & env_file);
const std::list<std::string> & get_in_files() const;
const std::list<std::string> & get_out_files() const;
std::string getMaximumDuration() const;
- resourceParamsContainer getResourceRequiredParams() const;
+ resourceParamsJob getResourceRequiredParams() const;
std::string getQueue() const;
std::string getPartition() const;
std::string getEnvFile() const;
// Checks
void checkMaximumDuration(const std::string & maximum_duration);
- void checkResourceRequiredParams(const resourceParamsContainer & resource_required_params);
+ void checkResourceRequiredParams(const resourceParamsJob& resource_required_params);
// Helps
long convertMaximumDuration(const std::string & maximum_duration);
std::map<std::string, std::string> _specific_parameters;
std::string _maximum_duration;
long _maximum_duration_in_second;
- resourceParamsContainer _resource_required_params;
+ resourceParamsJob _resource_required_params;
std::string _queue;
std::string _partition;
bool _exclusive;
}
// Resource part
- resourceParamsContainer resource_params = job.getResourceRequiredParams();
+ const resourceParamsJob resource_params = job.getResourceRequiredParams();
xmlNodePtr res_node = addNode(node, "resource_params", "");
addNode(res_node, "name", resource_params.name);
if (!resource_params.hostname.empty())
void
XML_Persistence::parseResourceNode(Job * new_job, xmlNodePtr res_node)
{
- resourceParamsContainer p;
+ resourceParamsJob p;
xmlNodePtr current_node = xmlFirstElementChild(res_node);
while (current_node != NULL)
{
p.name = getNodeContent(current_node);
else if (node_name == "hostname")
p.hostname = getNodeContent(current_node);
- else if (node_name == "OS")
- p.OS = getNodeContent(current_node);
- else if (node_name == "nb_proc")
- p.nb_proc = getNumericalNodeContent<long>(current_node);
- else if (node_name == "nb_node")
- p.nb_node = getNumericalNodeContent<long>(current_node);
- else if (node_name == "nb_proc_per_node")
- p.nb_proc_per_node = getNumericalNodeContent<long>(current_node);
- else if (node_name == "cpu_clock")
- p.cpu_clock = getNumericalNodeContent<long>(current_node);
- else if (node_name == "mem_mb")
- p.mem_mb = getNumericalNodeContent<long>(current_node);
- else if (node_name == "mem_per_cpu")
- new_job->setMemPerCpu(getNumericalNodeContent<long>(current_node));
+ // else if (node_name == "OS")
+ // p.OS = getNodeContent(current_node);
+ // else if (node_name == "nb_proc")
+ // p.nb_proc = getNumericalNodeContent<long>(current_node);
+ // else if (node_name == "nb_node")
+ // p.nb_node = getNumericalNodeContent<long>(current_node);
+ // else if (node_name == "nb_proc_per_node")
+ // p.nb_proc_per_node = getNumericalNodeContent<long>(current_node);
+ // else if (node_name == "cpu_clock")
+ // p.cpu_clock = getNumericalNodeContent<long>(current_node);
+ // else if (node_name == "mem_mb")
+ // p.mem_mb = getNumericalNodeContent<long>(current_node);
+ // else if (node_name == "mem_per_cpu")
+ // new_job->setMemPerCpu(getNumericalNodeContent<long>(current_node));
else
throw LauncherException(string("invalid node \"") + node_name + "\"");
current_node = xmlNextElementSibling(current_node);
result.result_directory = job_parameters.result_directory.in();
result.maximum_duration = job_parameters.maximum_duration.in();
- result.resource_required = resourceParametersContainer_CORBAtoCPP(job_parameters.resource_required);
+ result.resource_required = resourceParametersJob_CORBAtoCPP(job_parameters.resource_required);
result.queue = job_parameters.queue.in();
result.partition = job_parameters.partition.in();
result->result_directory = CORBA::string_dup(job_parameters.result_directory.c_str());
result->maximum_duration = CORBA::string_dup(job_parameters.maximum_duration.c_str());
- result->resource_required = resourceParametersContainer_CPPtoCORBA(job_parameters.resource_required);
+ result->resource_required = resourceParametersJob_CPPtoCORBA(job_parameters.resource_required);
result->queue = CORBA::string_dup(job_parameters.queue.c_str());
result->partition = CORBA::string_dup(job_parameters.partition.c_str());
salome.salome_init()
launcher = salome.naming_service.Resolve('/SalomeLauncher')
job_params = salome.JobParameters()
- job_params.resource_required = salome.ResourceParametersContainer()
+ job_params.resource_required = salome.ResourceParametersJob()
job_params.resource_required.name = "localhost"
- job_params.resource_required.nb_proc = 1 # slurm: --ntasks
job_params.job_type = "command"
#cwd = os.getcwd()
def create_JobParameters(self):
job_params = salome.JobParameters()
job_params.wckey="P11N0:SALOME" #needed by edf clusters
- job_params.resource_required = salome.ResourceParametersContainer()
- job_params.resource_required.nb_proc = 1
+ job_params.resource_required = salome.ResourceParametersJob()
return job_params
##############################
std::string local_directory;
std::string result_directory;
std::string maximum_duration;
- resourceParamsContainer resource_required;
+ resourceParamsJob resource_required;
std::string queue;
std::string partition;
bool exclusive;
return jp
def createResourceParameters():
- return pylauncher.resourceParamsContainer()
+ return pylauncher.resourceParamsJob()
# Test of SalomeLauncher.
# This test should be run in the salome environment, using "salome shell".
# Get the list of possible ressources
ressource_param = createResourceParameters()
rm = createResourcesManager()
- cls.ressources = rm.GetFittingResourcesContainer(ressource_param)
+ cls.ressources = rm.GetFittingResourcesJob(ressource_param)
def verifyFile(self, path, content):
try:
def create_JobParameters(self):
job_params = createJobParameters()
job_params.wckey="P11U5:CARBONES" #needed by edf clusters
- job_params.resource_required.nb_proc = 1
return job_params
##############################
jp.job_file = "/home/I35256/salome/scripts/job_sh/script.sh"
jp.work_directory = "/tmp/wd"
jp.result_directory = "/tmp/rd"
-rp = pylauncher.resourceParamsContainer()
+rp = pylauncher.resourceParamsJob()
rp.name="localhost"
rp.hostname="localhost"
-rp.nb_proc = 1
jp.resource_required = rp
launcher = pylauncher.Launcher_cpp()
# no catalog. localhost is defined anyway
Engines.ResourceParametersContainer.__init__(self, name, hostname,
policy, resList,
OS, componentList, nb_proc, mem_mb, cpu_clock, nb_node, nb_proc_per_node)
+
+class ResourceParametersJob(Engines.ResourceParametersJob):
+ def __init__(self, name="", hostname="",
+ policy="", resList = None):
+ if resList is None:
+ resList = []
+ Engines.ResourceParametersJob.__init__(self, name, hostname,
+ policy, resList)
class JobParameters (Engines.JobParameters):
def __init__(self, job_name="", job_type="", job_file="", pre_command="", env_file="", in_files=None, out_files=None,
if specific_parameters is None:
specific_parameters = []
if resource_required is None:
- resource_required = ResourceParametersContainer()
+ resource_required = ResourceParametersJob()
Engines.JobParameters.__init__(self, job_name, job_type, job_file, pre_command, env_file, in_files, out_files,
work_directory, local_directory, result_directory, maximum_duration,
resource_required, queue, partition, exclusive, mem_per_cpu,
// }
}
+ template<typename T> ResourceList GetAllResources(const T& resources)
+ {
+ ResourceList result;
+ for (const auto& res : resources)
+ {
+ result.push_back(res.first);
+ }
+
+ return result;
+ }
+
template<typename T> ResourceList GetResourcesByHostname(const std::string& hostnameIn, const T& resourceList)
{
if (hostnameIn.empty())
throw ResourcesException("Resource name was not found in resource list! Requested name: " + params.name);
}
+ if (params.hostname.empty())
+ {
+ // Use all available resources
+ return GetAllResources(_resourcesListJob);
+ }
+
// Step 3
ResourceList result = GetResourcesByHostname(params.hostname, _resourcesListJob);