Commit 0fe0213a authored by Marcel Henrik Schubert's avatar Marcel Henrik Schubert
Browse files

complete scripts

parent 9f2ff30f
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/svm_comp_count_1000.out
#SBATCH -e ./out/svm_comp_count_1000.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J comp_count_1000
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/svm_count.py stratified_subsample bigram 4 1000 complete_balance
echo "job finished"
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/svm_comp_count_200.out
#SBATCH -e ./out/svm_comp_count_200.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J comp_200
# Queue:
#SBATCH --partition=short
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=04:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/svm_count.py stratified_subsample bigram 10 200 complete_balance
echo "job finished"
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/svm_comp_count_2000.out
#SBATCH -e ./out/svm_comp_count_2000.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J comp_count_2000
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/svm_count.py stratified_subsample bigram 3 2000 complete_balance
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/svm_comp_count_500.out
#SBATCH -e ./out/svm_comp_count_500.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J comp_count_500
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/svm_count.py stratified_subsample bigram 4 500 complete_balance
echo "job finished"
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment