-// Copyright (C) 2007-2012 CEA/DEN, EDF R&D, OPEN CASCADE
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D, OPEN CASCADE
//
-// Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
-// CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
+// Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
+// CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
//
/*
* BatchManager_Slurm.cxx :
}
// Method to submit a job to the batch manager
- const JobId BatchManager_Slurm::submitJob(const Job & job)
+ const JobId BatchManager_Slurm::runJob(const Job & job)
{
Parametre params = job.getParametre();
const string workDir = params[WORKDIR];
- // export input files on cluster
- exportInputFiles(job);
-
// build command file to submit the job and copy it on the server
string cmdFile = buildCommandFile(job);
// define command to submit batch
- string subCommand = string("cd ") + workDir + "; sbatch " + cmdFile;
+ string subCommand = string("bash -l -c \\\"cd ") + workDir + "; sbatch " + cmdFile + "\\\"";
string command = _protocol.getExecCommand(subCommand, _hostname, _username);
command += " 2>&1";
LOG(command);
ofstream tempOutputFile;
string tmpFileName = Utils::createAndOpenTemporaryFile("slurm-script", tempOutputFile);
- tempOutputFile << "#!/bin/bash" << endl;
+ tempOutputFile << "#!/bin/bash -l" << endl;
tempOutputFile << "#SBATCH --output=" << workDir << "/logs/output.log." << rootNameToExecute << endl;
tempOutputFile << "#SBATCH --error=" << workDir << "/logs/error.log." << rootNameToExecute << endl;
tempOutputFile << "#SBATCH --time=" << params[MAXWALLTIME] << endl;
if (params.find(MAXRAMSIZE) != params.end())
tempOutputFile << "#SBATCH --mem=" << params[MAXRAMSIZE] << endl;
+ else if (params.find(MEMPERCPU) != params.end())
+ tempOutputFile << "#SBATCH --mem-per-cpu=" << params[MEMPERCPU] << endl;
if (params.find(QUEUE) != params.end())
- tempOutputFile << "#SBATCH --partition=" << params[QUEUE] << endl;
+ tempOutputFile << "#SBATCH --qos=" << params[QUEUE] << endl;
+ if (params.find(PARTITION) != params.end())
+ tempOutputFile << "#SBATCH --partition=" << params[PARTITION] << endl;
+ if (params.find(WCKEY) != params.end())
+ tempOutputFile << "#SBATCH --wckey=" << params[WCKEY] << endl;
+ if (params.find(NBNODE) != params.end())
+ tempOutputFile << "#SBATCH --nodes=" << params[NBNODE] << endl;
+ if (params.find(EXTRAPARAMS) != params.end())
+ tempOutputFile << params[EXTRAPARAMS] << endl;
// Define environment for the job
Environnement env = job.getEnvironnement();
}
// generate nodes file
- tempOutputFile << "LIBBATCH_NODEFILE=`mktemp nodefile-XXXXXXXXXX`" << endl;
- tempOutputFile << "srun hostname > $LIBBATCH_NODEFILE" << endl;
+ tempOutputFile << "LIBBATCH_NODEFILE=$(mktemp nodefile-XXXXXXXXXX)" << endl;
+ tempOutputFile << "srun hostname > \"$LIBBATCH_NODEFILE\"" << endl;
tempOutputFile << "export LIBBATCH_NODEFILE" << endl;
// Launch the executable
tempOutputFile << endl;
// Remove the node file
- tempOutputFile << "rm $LIBBATCH_NODEFILE" << endl;
+ tempOutputFile << "rm \"$LIBBATCH_NODEFILE\"" << endl;
tempOutputFile.flush();
tempOutputFile.close();
void BatchManager_Slurm::deleteJob(const JobId & jobid)
{
// define command to delete job
- string subCommand = "scancel " + jobid.getReference();
+ string subCommand = string("bash -l -c \\\"scancel ") + jobid.getReference() + "\\\"";
string command = _protocol.getExecCommand(subCommand, _hostname, _username);
LOG(command);
JobInfo BatchManager_Slurm::queryJob(const JobId & jobid)
{
- // define command to query batch
- string subCommand = "squeue -o %t -j " + jobid.getReference();
+ // First try to query the job with "squeue" command
+ string subCommand = string("bash -l -c \\\"squeue -h -o %T -j ") + jobid.getReference() + " 2>/dev/null" + "\\\"";
string command = _protocol.getExecCommand(subCommand, _hostname, _username);
LOG(command);
string output;
- Utils::getCommandOutput(command, output);
- // We don't test the return code here because with jobs finished since a long time Slurm
- // returns an error and a message like "slurm_load_jobs error: Invalid job id specified".
- // So we consider that the job is finished when we get an error.
+ int status = Utils::getCommandOutput(command, output);
+ LOG("status: " << status << ", output: " << output);
+ bool found = false;
+ JobInfo jobinfo;
+ if (status == 0) {
+ try {
+ jobinfo = JobInfo_Slurm(jobid.getReference(), output);
+ found = true;
+ } catch (const RunTimeException & exc) {
+ LOG(exc);
+ }
+ }
- JobInfo_Slurm jobinfo = JobInfo_Slurm(jobid.getReference(), output);
+ // If "squeue" failed, the job may be finished. In this case, try to query the job with
+ // "sacct".
+ if (! found) {
+ string subCommand = string("bash -l -c \\\"sacct -X -o State%-10 -n -j ") + jobid.getReference() + "\\\"";
+ string command = _protocol.getExecCommand(subCommand, _hostname, _username);
+ LOG(command);
+ string output;
+ int status = Utils::getCommandOutput(command, output);
+ LOG("status: " << status << ", output: " << output);
+ if (status == 0) {
+ try {
+ jobinfo = JobInfo_Slurm(jobid.getReference(), output);
+ } catch (const RunTimeException & exc) {
+ LOG(exc);
+ throw(exc);
+ }
+ } else {
+ throw RunTimeException("sacct command failed with return code: " + status);
+ }
+ }
return jobinfo;
}