-
Notifications
You must be signed in to change notification settings - Fork 10
/
slurm_ndvi.sh
54 lines (45 loc) · 1.14 KB
/
slurm_ndvi.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#!/bin/bash
# number of nodes
#SBATCH -N 1
# number of cpus per task
#SBATCH -c 1
# memory
#SBATCH --mem 12G
# wallclock
#SBATCH -t 1:00:00
# job log path
#SBATCH -o %x.o%j
#SBATCH -e %x.o%j
echo ________________________________________
echo
echo SLURM Job Log
echo Start time: $(date)
echo
echo Job name: $SLURM_JOB_NAME
echo Job ID: $SLURM_JOBID
echo Submitted by user: $USER
echo User effective group ID: $(id -ng)
echo
echo SLURM account used: $SLURM_ACCOUNT
echo Hostname of submission: $SLURM_SUBMIT_HOST
echo Submitted to cluster: $SLURM_CLUSTER_NAME
echo Submitted to node: $SLURMD_NODENAME
echo Cores on node: $SLURM_CPUS_ON_NODE
echo Requested cores per task: $SLURM_CPUS_PER_TASK
echo Requested cores per job: $SLURM_NTASKS
echo Requested walltime: $SBATCH_TIMELIMIT
echo Nodes assigned to job: $SLURM_JOB_NODELIST
echo Running node index: $SLURM_NODEID
echo
echo Running on hostname: $HOSTNAME
echo Parent PID: $PPID
echo Process PID: $$
echo
echo Working directory: $SLURM_SUBMIT_DIR
echo ________________________________________________________
echo
cd $SLURM_SUBMIT_DIR
# init gdal tools
source ~/.bashrc; conda activate pgc
echo $p1
time eval $p1