diff --git a/CHANGELOG.md b/CHANGELOG.md index b7776f6..2b91983 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.5.2](https://github.com/snakemake/snakemake-executor-plugin-slurm/compare/v0.5.1...v0.5.2) (2024-06-04) + + +### Bug Fixes + +* [#97](https://github.com/snakemake/snakemake-executor-plugin-slurm/issues/97) preventing node confinment ([#98](https://github.com/snakemake/snakemake-executor-plugin-slurm/issues/98)) ([fa7877f](https://github.com/snakemake/snakemake-executor-plugin-slurm/commit/fa7877f8d086883ce74db75c3246b8c050720a62)) + ## [0.5.1](https://github.com/snakemake/snakemake-executor-plugin-slurm/compare/v0.5.0...v0.5.1) (2024-05-14) diff --git a/docs/further.md b/docs/further.md index 48aa352..be2eb49 100644 --- a/docs/further.md +++ b/docs/further.md @@ -2,7 +2,7 @@ ## The general Idea -To use this plugin, log in to your cluster's head node (sometimes called the "login" node), activate your environment as usual and start Snakemake. Snakemake will then submit your jobs as cluster jobs. +To use this plugin, log in to your cluster's head node (sometimes called the "login" node), activate your environment as usual, and start Snakemake. Snakemake will then submit your jobs as cluster jobs. ## Specifying Account and Partition @@ -86,6 +86,8 @@ other systems, e.g. by replacing `srun` with `mpiexec`: $ snakemake --set-resources calc_pi:mpi="mpiexec" ... ``` +To submit "ordinary" MPI jobs, submitting with `tasks` (the MPI ranks) is sufficient. Alternatively, on some clusters, it might be convenient to just configure `nodes`. Consider using a combination of `tasks` and `cpus_per_task` for hybrid applications (those that use ranks (multiprocessing) and threads). A detailed topology layout can be achieved using the `slurm_extra` parameter (see below) using further flags like `--distribution`. + ## Running Jobs locally Not all Snakemake workflows are adapted for heterogeneous environments, particularly clusters. Users might want to avoid the submission of _all_ rules as cluster jobs. Non-cluster jobs should usually include _short_ jobs, e.g. internet downloads or plotting rules. @@ -158,8 +160,7 @@ set-resources: ## Additional Custom Job Configuration SLURM installations can support custom plugins, which may add support -for additional flags to `sbatch`. In addition, there are various -`sbatch` options not directly supported via the resource definitions +for additional flags to `sbatch`. In addition, there are various batch options not directly supported via the resource definitions shown above. You may use the `slurm_extra` resource to specify additional flags to `sbatch`: @@ -210,7 +211,7 @@ shared-fs-usage: local-storage-prefix: "" ``` -It will set the executor to be this SLURM executor, ensure sufficient file system latency and allow automatic stage-in of files using the [file system storage plugin](https://github.com/snakemake/snakemake-storage-plugin-fs). +It will set the executor to be this SLURM executor, ensure sufficient file system latency, and allow automatic stage-in of files using the [file system storage plugin](https://github.com/snakemake/snakemake-storage-plugin-fs). Note, that you need to set the `SNAKEMAKE_PROFILE` environment variable in your `~/.bashrc` file, e.g.: diff --git a/pyproject.toml b/pyproject.toml index ab18d20..6f7e594 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "snakemake-executor-plugin-slurm" -version = "0.5.1" +version = "0.5.2" description = "A Snakemake executor plugin for submitting jobs to a SLURM cluster." authors = [ "Christian Meesters ", diff --git a/snakemake_executor_plugin_slurm/__init__.py b/snakemake_executor_plugin_slurm/__init__.py index 14f466a..2fd7bb6 100644 --- a/snakemake_executor_plugin_slurm/__init__.py +++ b/snakemake_executor_plugin_slurm/__init__.py @@ -124,14 +124,23 @@ def run_job(self, job: JobExecutorInterface): "- submitting without. This might or might not work on your cluster." ) - # MPI job - if job.resources.get("mpi", False): - if job.resources.get("nodes", False): - call += f" --nodes={job.resources.get('nodes', 1)}" + if job.resources.get("nodes", False): + call += f" --nodes={job.resources.get('nodes', 1)}" # fixes #40 - set ntasks regarlless of mpi, because # SLURM v22.05 will require it for all jobs call += f" --ntasks={job.resources.get('tasks', 1)}" + # MPI job + if job.resources.get("mpi", False): + if not job.resources.get("tasks_per_node") and not job.resources.get( + "nodes" + ): + self.logger.warning( + "MPI job detected, but no 'tasks_per_node' or 'nodes' " + "specified. Assuming 'tasks_per_node=1'." + "Probably not what you want." + ) + call += f" --cpus-per-task={get_cpus_per_task(job)}" if job.resources.get("slurm_extra"):