Commit 532f10bf authored by Andreas Marek's avatar Andreas Marek
Browse files

Update CI test infrastructure

parent 81c1bf30
......@@ -136,12 +136,29 @@ then
if [ "$gpuJob" == "yes" ]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node_2GPU.sh .
echo "if \[ \$SLURM_PROCID -eq 0 \]" >> ./run_${CLUSTER}_1node_GPU.sh
echo "then" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "./configure " "$configureArgs" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "make -j 16" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "fi" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export OMP_NUM_THREADS=$ompThreads" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "while ! \[ -f ./build_done \];" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "do" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "echo \""\ > /dev/null" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "done" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "make check TEST_FLAGS=\" $matrixSize $nrEV $blockSize \" " >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "exitCode=\$?" >> ./run_${CLUSTER}_1node_2GPU.sh
......@@ -174,12 +191,30 @@ then
if [[ "$CI_RUNNER_TAGS" =~ "sse" ]] || [[ "$CI_RUNNER_TAGS" =~ "avx" ]] || [[ "$CI_RUNNER_TAGS" =~ "avx2" ]] || [ ["$CI_RUNNER_TAGS" =~ "avx512" ]]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node.sh .
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if \[ \$SLURM_PROCID -eq 0 \]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "\#decouple from SLURM \(maybe this could be removed\)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "./configure " "$configureArgs" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "make -j 16 " >> ./run_${CLUSTER}_1node.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export OMP_NUM_THREADS=$ompThreads" >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
#echo "while ! \[ -f ./build_done \];" >> ./run_${CLUSTER}_1node.sh
#echo "do" >> ./run_${CLUSTER}_1node.sh
#echo "echo \" \" > /dev/null" >> ./run_${CLUSTER}_1node.sh
#echo "done" >> ./run_${CLUSTER}_1node.sh
echo "make check TEST_FLAGS=\" $matrixSize $nrEV $blockSize \" " >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "exitCode=\$?" >> ./run_${CLUSTER}_1node.sh
......
......@@ -121,7 +121,20 @@ then
if [[ "$CI_RUNNER_TAGS" =~ "distcheck" ]]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node.sh .
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if [ \$SLURM_PROCID -eq 0 ]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "./configure " "$configureArgs" " || { cat config.log; exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
echo "export DISTCHECK_CONFIGURE_FLAGS=\" $distcheckConfigureArgs \" " >> ./run_${CLUSTER}_1node.sh
......
......@@ -135,16 +135,29 @@ then
echo "mkdir -p build" >> ./run_${CLUSTER}_1node.sh
echo "pushd build" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if [ \$SLURM_PROCID -eq 0 ]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running autogen " >> ./run_${CLUSTER}_1node.sh
echo "../autogen.sh" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running configure " >> ./run_${CLUSTER}_1node.sh
echo "../configure " "$configureArgs" " || { cat config.log; exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "#Running make " >> ./run_${CLUSTER}_1node.sh
echo "make -j 8 || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running make " >> ./run_${CLUSTER}_1node.sh
echo "make -j 8 || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running make install" >> ./run_${CLUSTER}_1node.sh
echo "make install || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment