Commit 69a7e567 authored by Andreas Marek's avatar Andreas Marek

CI: Update runscripts

parent c0904cb8
This diff is collapsed.
......@@ -689,6 +689,11 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
memory = set_requested_memory(matrix_size[na])
if (g != "with-gpu"):
gpuJob="no"
else:
gpuJob="yes"
# do the configure
if ( instr == "sse" or (instr == "avx" and g != "with-gpu")):
if ( instr == "sse"):
......@@ -697,33 +702,18 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
+ libs + " " + ldflags + " " + " "+ scalapackldflags +" " + scalapackfcflags \
+ " --enable-option-checking=fatal" + " " + mpi_configure_flag + " " + openmp[o] \
+ " " + precision[p] + " " + assumed_size[a] + " " + band_to_full_blocking[b] \
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -i $INTERACTIVE_RUN -S $SLURM")
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -i $INTERACTIVE_RUN -S $SLURM -g " +gpuJob)
if ( instr == "avx2" or instr == "avx512" or instr == "knl" or g == "with-gpu"):
print(" - export REQUESTED_MEMORY="+memory)
print("\n")
#if (g == "with-gpu"):
# print(" - echo \"The tasks will be submitted to SLURM PARTITION \" $SLURMPARTITION \" on host \" $SLURMHOST \" with constraints \" $CONTSTRAINTS \" with the geometry \" $GEOMETRYRESERVATION" )
#else:
# print(" - echo \"The tasks will be submitted to SLURM PARTITION \" $SLURMPARTITION \" on host \" $SLURMHOST \"with constraints \" $CONTSTRAINTS ")
## construct srun command-line
#if (g == "with-gpu"):
# print(" - export SRUN_COMMANDLINE_CONFIGURE=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$CONFIGURETIME --constraint=$CONTSTRAINTS --gres=$GEOMETRYRESERVATION \" ")
# print(" - export SRUN_COMMANDLINE_BUILD=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$BUILDTIME --constraint=$CONTSTRAINTS --gres=$GEOMETRYRESERVATION \" ")
# print(" - export SRUN_COMMANDLINE_RUN=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$RUNTIME --constraint=$CONTSTRAINTS --gres=$GEOMETRYRESERVATION \" ")
#else:
# print(" - export SRUN_COMMANDLINE_CONFIGURE=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$CONFIGURETIME --constraint=$CONTSTRAINTS --mem=$REQUESTED_MEMORY\" ")
# print(" - export SRUN_COMMANDLINE_BUILD=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$BUILDTIME --constraint=$CONTSTRAINTS --mem=$REQUESTED_MEMORY \" ")
# print(" - export SRUN_COMMANDLINE_RUN=\"--partition=$SLURMPARTITION --nodelist=$SLURMHOST --time=$RUNTIME --constraint=$CONTSTRAINTS --mem=$REQUESTED_MEMORY \" ")
##print(" - echo \"srun --ntasks=1 --cpus-per-task=1 $SRUN_COMMANDLINE_CONFIGURE\" ")
if (runScalapackTest):
print(" - ./ci_test_scripts/run_ci_tests.sh -c \" CC=\\\""+c_compiler_wrapper+"\\\"" + " CFLAGS=\\\""+CFLAGS+"\\\"" + " FC=\\\""+fortran_compiler_wrapper+"\\\"" + " FCFLAGS=\\\""+FCFLAGS+"\\\"" \
+ libs + " " + ldflags + " " + " "+ scalapackldflags +" " + scalapackfcflags \
+ " --enable-option-checking=fatal --enable-scalapack-tests" + " " + mpi_configure_flag + " " + openmp[o] \
+ " " + precision[p] + " " + assumed_size[a] + " " + band_to_full_blocking[b] \
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q \"srun\" -S $SLURM")
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q \"srun\" -S $SLURM -g " +gpuJob)
else:
......@@ -731,7 +721,7 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
+ libs + " " + ldflags + " " + " "+ scalapackldflags +" " + scalapackfcflags \
+ " --enable-option-checking=fatal" + " " + mpi_configure_flag + " " + openmp[o] \
+ " " + precision[p] + " " + assumed_size[a] + " " + band_to_full_blocking[b] \
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q \"srun\" -i $INTERACTIVE_RUN -S $SLURM")
+ " " +gpu[g] + INSTRUCTION_OPTIONS + "\" -j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q \"srun\" -i $INTERACTIVE_RUN -S $SLURM -g " +gpuJob)
# do the test
......
......@@ -14,6 +14,7 @@ skipStep=0
batchCommand=""
interactiveRun="yes"
slurmBatch="no"
gpuJob="no"
function usage() {
cat >&2 <<-EOF
......@@ -21,7 +22,7 @@ function usage() {
Call all the necessary steps to perform an ELPA CI test
Usage:
run_ci_tests [-c configure arguments] [-j makeTasks] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-s skipStep] [-q submit command] [-i interactive run] [-S submit to Slurm]"
run_ci_tests [-c configure arguments] [-j makeTasks] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-s skipStep] [-q submit command] [-i interactive run] [-S submit to Slurm] [-g GPU job]"
Options:
-c configure arguments
......@@ -56,13 +57,15 @@ function usage() {
-S submit to slurm
if "yes" a SLURM batch job will be submitted
-g gpu job
if "yes" a GPU job is assumed
-h
Print this help text
EOF
}
while getopts "c:t:j:m:n:b:o:s:q:i:S:h" opt; do
while getopts "c:t:j:m:n:b:o:s:q:i:S:g:h" opt; do
case $opt in
j)
makeTasks=$OPTARG;;
......@@ -86,6 +89,8 @@ while getopts "c:t:j:m:n:b:o:s:q:i:S:h" opt; do
interactiveRun=$OPTARG;;
S)
slurmBatch=$OPTARG;;
g)
gpuJob=$OPTARG;;
:)
echo "Option -$OPTARG requires an argument" >&2;;
h)
......@@ -128,7 +133,7 @@ then
echo "Running on $CLUSTER with runner $CI_RUNNER_DESCRIPTION with tag $CI_RUNNER_TAGS on $mpiTasks tasks"
# GPU runners
if [ "$CI_RUNNER_TAGS" == "gpu" ]
if [ "$gpuJob" == "yes" ]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node_2GPU.sh .
echo "./configure " "$configureArgs" >> ./run_${CLUSTER}_1node_2GPU.sh
......
......@@ -11,6 +11,7 @@ ompThreads=1
configueArg=""
batchCommand=""
slurmBatch="no"
gpuJob="no"
function usage() {
cat >&2 <<-EOF
......@@ -18,7 +19,7 @@ function usage() {
Call all the necessary steps to perform an ELPA CI test
Usage:
run_distcheck_tests [-c configure arguments] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-q submit command] [-S submit to Slurm]"
run_distcheck_tests [-c configure arguments] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-q submit command] [-S submit to Slurm] [-g GPU job]"
Options:
-c configure arguments
......@@ -44,13 +45,16 @@ function usage() {
-S submit to slurm
if "yes" a SLURM batch job will be submitted
-g gpu job
if "yes" a gpu job is assumed
-h
Print this help text
EOF
}
while getopts "c:t:j:m:n:b:o:s:q:i:S:h" opt; do
while getopts "c:t:j:m:n:b:o:s:q:i:S:g:h" opt; do
case $opt in
t)
mpiTasks=$OPTARG;;
......@@ -68,6 +72,8 @@ while getopts "c:t:j:m:n:b:o:s:q:i:S:h" opt; do
batchCommand=$OPTARG;;
S)
slurmBatch=$OPTARG;;
g)
gpuJob=$OPTARG;;
:)
echo "Option -$OPTARG requires an argument" >&2;;
h)
......@@ -114,6 +120,9 @@ then
echo "export DISTCHECK_CONFIGURE_FLAGS=\" $configureArgs \" " >> ./run_${CLUSTER}_1node.sh
echo "make distcheck TEST_FLAGS=\" $matrixSize $nrEV $blockSize \" || { chmod u+rwX -R . ; exit 1 ; } " >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#copy everything back from /tmp/elpa to runner directory"
echo "cp -r * $runner_path"
echo " "
echo "Job script for the run"
cat ./run_${CLUSTER}_1node.sh
......
......@@ -14,6 +14,7 @@ slurmBatch="no"
projectName="unknown"
projectExecutable=""
projectConfigureArg=""
gpuJob="no"
function usage() {
cat >&2 <<-EOF
......@@ -21,7 +22,7 @@ function usage() {
Call all the necessary steps to perform an ELPA CI test
Usage:
run_project_tests [-c configure arguments] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-q submit command] [-S submit to Slurm] [-p projectName] [-e projectExecutable] [-C project configure arguments]"
run_project_tests [-c configure arguments] [-h] [-t MPI Tasks] [-m matrix size] [-n number of eigenvectors] [-b block size] [-o OpenMP threads] [-q submit command] [-S submit to Slurm] [-p projectName] [-e projectExecutable] [-C project configure arguments] [-g gpu job]"
Options:
-c configure arguments
......@@ -56,13 +57,16 @@ function usage() {
-C project configure arguments
arguments for the configure of the project
-g gpu job
if "yes" a gpu job is assumed
-h
Print this help text
EOF
}
while getopts "c:t:j:m:n:b:o:s:q:i:S:p:e:C:h" opt; do
while getopts "c:t:j:m:n:b:o:s:q:i:S:p:e:C:g:h" opt; do
case $opt in
t)
mpiTasks=$OPTARG;;
......@@ -86,6 +90,8 @@ while getopts "c:t:j:m:n:b:o:s:q:i:S:p:e:C:h" opt; do
projectExecutable=$OPTARG;;
C)
projectConfigureArgs=$OPTARG;;
g)
gpuJob=$OPTARG;;
:)
echo "Option -$OPTARG requires an argument" >&2;;
h)
......@@ -172,6 +178,8 @@ then
echo "rm -rf installdest" >> ./run_${CLUSTER}_1node.sh
echo "popd" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#copy everything back from /tmp/elpa to runner directory"
echo "cp -r * $runner_path"
echo " "
echo "Job script for the run"
cat ./run_${CLUSTER}_1node.sh
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment