Commit 32545daf authored by Marcel Henrik Schubert's avatar Marcel Henrik Schubert
Browse files

added log mT

parent 90689bab
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/log_mT_1000.out
#SBATCH -e ./out/log_mT_1000.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J log_mT_1000
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/logistic.py stratified_subsample bigram 4 1000 min_tweet_1000
echo "job finished"
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/log_mT_200.out
#SBATCH -e ./out/log_mT_200.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J log_mT_200
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/logistic.py stratified_subsample bigram 4 200 min_tweet_1000
echo "job finished"
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/log_mT_2000.out
#SBATCH -e ./out/log_mT_2000.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J log_mT_2000
# Queue:
#SBATCH --partition=broadwell
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=80
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/logistic.py stratified_subsample bigram 6 2000 min_tweet_1000
echo "job finished"
\ No newline at end of file
#!/bin/bash -l
# Standard output and error:
# #SBATCH --open-mode=truncate
#SBATCH -o ./out/log_500.out
#SBATCH -e ./out/log_500.err
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J log_500
# Queue:
#SBATCH --partition=general
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=64
#SBATCH --mail-type=none
#SBATCH --mail-user=schubert@coll.mpg.de
# Wall clock limit:
#SBATCH --time=24:00:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
export SLURM_HINT=multithread
module load gcc/8
module load anaconda/3/5.1
module load scikit-learn/0.19.1
# Run the program:
srun python /draco/u/mschuber/PAN/attributionfeatures/Scripts/logistic.py stratified_subsample bigram 4 500 org
echo "job finished"
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment