diff --git a/slurm/README.md b/slurm/README.md new file mode 100644 index 0000000..eb55d85 --- /dev/null +++ b/slurm/README.md @@ -0,0 +1,13 @@ +## Slurm instructions + +the `srun.sh` script is a standalone script to run a single slurm job in the current working directory. This should usually be run in its own directory as it will generate a number of outfiles, as well as data and job id files. + +it can be run, once the desired slurm and python parameters for the job are set in `srun.sh`, using: + + sbatch srun.sh + +The `sbatch.sh` file is used to batch creat a number of slurm jobs, and will generate its own measurement subdirectories for each parameter set. The example files will run 32 measurements on 32 cores for each job, and a job will be submitted for 6 stick densities, from 4 to 20 per um. + +slurm parameters are set in the `srun_header.sh` file, while parameters for the python job are set in `sbatch.sh`. These must be checked to ensure they match, and for that reason could be integrated into a single script at some point. Usage is: + + bash sbatch.sh diff --git a/slurm/sbatch.sh b/slurm/sbatch.sh new file mode 100644 index 0000000..df00562 --- /dev/null +++ b/slurm/sbatch.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# this dictates the number of measurements to be made, and for what densities +for density in 4 9 10 11 14 20 +do + n=$(echo $density*3600 | bc) + echo $n + mkdir meas_32x$density + cp srun_header.sh meas_32x$density/srun.sh + echo 'python3 ~/gitrepos/networksim-cntfet/measure_perc.py -s --cores 32 --start '$n' --step 0 --number 32 --scaling 60'>>meas_32x$density/srun.sh + cd meas_32x$density/ + output=$(sbatch srun.sh) + echo $output + touch 'jobid_'${output:20} + cd .. +done diff --git a/srun.sh b/slurm/srun.sh similarity index 89% rename from srun.sh rename to slurm/srun.sh index 03b4406..53c8e30 100644 --- a/srun.sh +++ b/slurm/srun.sh @@ -4,6 +4,6 @@ #SBATCH -o outfile # send stdout to outfile #SBATCH -e errfile # send stderr to errfile #SBATCH -t 10:00:00 # time requested in hour:minute:second -#SBATCH --mem-per-cpu=1024 +#SBATCH --mem-per-cpu=1024 #memory per task in MB #SBATCH --mail-user=$USER@localhost python3 ~/gitrepos/networksim-cntfet/measure_perc.py -s --cores 32 --start 36000 --step 0 --number 32 --scaling 60 diff --git a/slurm/srun_header.sh b/slurm/srun_header.sh new file mode 100644 index 0000000..1d7dbf4 --- /dev/null +++ b/slurm/srun_header.sh @@ -0,0 +1,8 @@ +#!/bin/bash +#SBATCH -N 1 # nodes requested +#SBATCH -n 32 # tasks requested. default is one core per task +#SBATCH -o outfile # send stdout to outfile +#SBATCH -e errfile # send stderr to errfile +#SBATCH --mem-per-cpu=1024 +#SBATCH -t 24:00:00 # time requested in hour:minute:second +#SBATCH --mail-user=$USER@localhost