Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
elpa
elpa
Commits
532f10bf
Commit
532f10bf
authored
May 14, 2021
by
Andreas Marek
Browse files
Update CI test infrastructure
parent
81c1bf30
Changes
3
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
ci_test_scripts/run_ci_tests.sh
View file @
532f10bf
...
...
@@ -136,12 +136,29 @@ then
if
[
"
$gpuJob
"
==
"yes"
]
then
cp
$HOME
/runners/job_script_templates/run_
${
CLUSTER
}
_1node_2GPU.sh
.
echo
"if
\[
\$
SLURM_PROCID -eq 0
\]
"
>>
./run_
${
CLUSTER
}
_1node_GPU.sh
echo
"then"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"echo
\"
process
\$
SLURM_PROCID running configure
\"
"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"#decouple from SLURM (maybe this could be removed)"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export _save_SLURM_MPI_TYPE=
\$
SLURM_MPI_TYPE"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export _save_I_MPI_SLURM_EXT=
\$
I_MPI_SLURM_EXT"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export _save_I_MPI_PMI_LIBRARY=
\$
I_MPI_PMI_LIBRARY"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export _save_I_MPI_PMI2=
\$
I_MPI_PMI2"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export _save_I_MPI_HYDRA_BOOTSTRAP=
\$
I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"./configure "
"
$configureArgs
"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"make -j 16"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"touch build_done"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"fi"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export OMP_NUM_THREADS=
$ompThreads
"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"export TASKS=
$mpiTasks
"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
#echo "while ! \[ -f ./build_done \];" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "do" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "echo \""\ > /dev/null" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "done" >> ./run_${CLUSTER}_1node_2GPU.sh
echo
"make check TEST_FLAGS=
\"
$matrixSize
$nrEV
$blockSize
\"
"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
echo
"exitCode=
\$
?"
>>
./run_
${
CLUSTER
}
_1node_2GPU.sh
...
...
@@ -174,12 +191,30 @@ then
if
[[
"
$CI_RUNNER_TAGS
"
=
~
"sse"
]]
||
[[
"
$CI_RUNNER_TAGS
"
=
~
"avx"
]]
||
[[
"
$CI_RUNNER_TAGS
"
=
~
"avx2"
]]
||
[
[
"
$CI_RUNNER_TAGS
"
=
~
"avx512"
]]
then
cp
$HOME
/runners/job_script_templates/run_
${
CLUSTER
}
_1node.sh
.
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"if
\[
\$
SLURM_PROCID -eq 0
\]
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"then"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"echo
\"
process
\$
SLURM_PROCID running configure
\"
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"
\#
decouple from SLURM
\(
maybe this could be removed
\)
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_SLURM_MPI_TYPE=
\$
SLURM_MPI_TYPE"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_SLURM_EXT=
\$
I_MPI_SLURM_EXT"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI_LIBRARY=
\$
I_MPI_PMI_LIBRARY"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI2=
\$
I_MPI_PMI2"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_HYDRA_BOOTSTRAP=
\$
I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"./configure "
"
$configureArgs
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"make -j 16 "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"touch build_done"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"fi"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export OMP_NUM_THREADS=
$ompThreads
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export TASKS=
$mpiTasks
"
>>
./run_
${
CLUSTER
}
_1node.sh
#echo "while ! \[ -f ./build_done \];" >> ./run_${CLUSTER}_1node.sh
#echo "do" >> ./run_${CLUSTER}_1node.sh
#echo "echo \" \" > /dev/null" >> ./run_${CLUSTER}_1node.sh
#echo "done" >> ./run_${CLUSTER}_1node.sh
echo
"make check TEST_FLAGS=
\"
$matrixSize
$nrEV
$blockSize
\"
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"exitCode=
\$
?"
>>
./run_
${
CLUSTER
}
_1node.sh
...
...
ci_test_scripts/run_distcheck_tests.sh
View file @
532f10bf
...
...
@@ -121,7 +121,20 @@ then
if
[[
"
$CI_RUNNER_TAGS
"
=
~
"distcheck"
]]
then
cp
$HOME
/runners/job_script_templates/run_
${
CLUSTER
}
_1node.sh
.
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"if [
\$
SLURM_PROCID -eq 0 ]"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"then"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"echo
\"
process
\$
SLURM_PROCID running configure
\"
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#decouple from SLURM (maybe this could be removed)"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_SLURM_MPI_TYPE=
\$
SLURM_MPI_TYPE"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_SLURM_EXT=
\$
I_MPI_SLURM_EXT"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI_LIBRARY=
\$
I_MPI_PMI_LIBRARY"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI2=
\$
I_MPI_PMI2"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_HYDRA_BOOTSTRAP=
\$
I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"./configure "
"
$configureArgs
"
" || { cat config.log; exit 1; }"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"fi"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export TASKS=
$mpiTasks
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export DISTCHECK_CONFIGURE_FLAGS=
\"
$distcheckConfigureArgs
\"
"
>>
./run_
${
CLUSTER
}
_1node.sh
...
...
ci_test_scripts/run_project_tests.sh
View file @
532f10bf
...
...
@@ -135,16 +135,29 @@ then
echo
"mkdir -p build"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"pushd build"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"if [
\$
SLURM_PROCID -eq 0 ]"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"then"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"echo
\"
process
\$
SLURM_PROCID running configure
\"
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#decouple from SLURM (maybe this could be removed)"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_SLURM_MPI_TYPE=
\$
SLURM_MPI_TYPE"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_SLURM_EXT=
\$
I_MPI_SLURM_EXT"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI_LIBRARY=
\$
I_MPI_PMI_LIBRARY"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_PMI2=
\$
I_MPI_PMI2"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export _save_I_MPI_HYDRA_BOOTSTRAP=
\$
I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#Running autogen "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"../autogen.sh"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#Running configure "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"../configure "
"
$configureArgs
"
" || { cat config.log; exit 1; }"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#Running make "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"make -j 8 || { exit 1; }"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"touch build_done"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"fi"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"export TASKS=
$mpiTasks
"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#Running make "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"make -j 8 || { exit 1; }"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
" "
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"#Running make install"
>>
./run_
${
CLUSTER
}
_1node.sh
echo
"make install || { exit 1; }"
>>
./run_
${
CLUSTER
}
_1node.sh
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment