Commit d780eff4 authored by Andreas Marek's avatar Andreas Marek
Browse files

Merge branch 'configure_test' into 'master_pre_stage'

Configure test

See merge request !69
parents 82ad3340 3cd9cef3
......@@ -21,6 +21,7 @@ issues
_not_ rely anymore that the user does check prior to calling ELPA whether the BLACSGRID is ok) if this check fails
then ELPA returns with an error
- limit number of OpenMP threads to one, if MPI thread level is not at least MPI_THREAD_SERIALIZED
- allow checking of the supported threading level of the MPI library at build time
Changelog for ELPA 2020.11.001
......
......@@ -136,12 +136,29 @@ then
if [ "$gpuJob" == "yes" ]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node_2GPU.sh .
echo "if \[ \$SLURM_PROCID -eq 0 \]" >> ./run_${CLUSTER}_1node_GPU.sh
echo "then" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "./configure " "$configureArgs" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "make -j 16" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "fi" >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export OMP_NUM_THREADS=$ompThreads" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "while ! \[ -f ./build_done \];" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "do" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "echo \""\ > /dev/null" >> ./run_${CLUSTER}_1node_2GPU.sh
#echo "done" >> ./run_${CLUSTER}_1node_2GPU.sh
echo "make check TEST_FLAGS=\" $matrixSize $nrEV $blockSize \" " >> ./run_${CLUSTER}_1node_2GPU.sh
echo " " >> ./run_${CLUSTER}_1node_2GPU.sh
echo "exitCode=\$?" >> ./run_${CLUSTER}_1node_2GPU.sh
......@@ -174,12 +191,30 @@ then
if [[ "$CI_RUNNER_TAGS" =~ "sse" ]] || [[ "$CI_RUNNER_TAGS" =~ "avx" ]] || [[ "$CI_RUNNER_TAGS" =~ "avx2" ]] || [ ["$CI_RUNNER_TAGS" =~ "avx512" ]]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node.sh .
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if \[ \$SLURM_PROCID -eq 0 \]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "\#decouple from SLURM \(maybe this could be removed\)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "./configure " "$configureArgs" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "make -j 16 " >> ./run_${CLUSTER}_1node.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export OMP_NUM_THREADS=$ompThreads" >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
echo "while ! [ -f ./build_done ];" >> ./run_${CLUSTER}_1node.sh
echo "do" >> ./run_${CLUSTER}_1node.sh
echo "echo \" \" > /dev/null" >> ./run_${CLUSTER}_1node.sh
echo "done" >> ./run_${CLUSTER}_1node.sh
echo "make check TEST_FLAGS=\" $matrixSize $nrEV $blockSize \" " >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "exitCode=\$?" >> ./run_${CLUSTER}_1node.sh
......
......@@ -121,7 +121,20 @@ then
if [[ "$CI_RUNNER_TAGS" =~ "distcheck" ]]
then
cp $HOME/runners/job_script_templates/run_${CLUSTER}_1node.sh .
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if [ \$SLURM_PROCID -eq 0 ]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "./configure " "$configureArgs" " || { cat config.log; exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
echo "export DISTCHECK_CONFIGURE_FLAGS=\" $distcheckConfigureArgs \" " >> ./run_${CLUSTER}_1node.sh
......
......@@ -135,16 +135,29 @@ then
echo "mkdir -p build" >> ./run_${CLUSTER}_1node.sh
echo "pushd build" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "if [ \$SLURM_PROCID -eq 0 ]" >> ./run_${CLUSTER}_1node.sh
echo "then" >> ./run_${CLUSTER}_1node.sh
echo "echo \"process \$SLURM_PROCID running configure\"" >> ./run_${CLUSTER}_1node.sh
echo "#decouple from SLURM (maybe this could be removed)" >> ./run_${CLUSTER}_1node.sh
echo "export _save_SLURM_MPI_TYPE=\$SLURM_MPI_TYPE" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_SLURM_EXT=\$I_MPI_SLURM_EXT" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI_LIBRARY=\$I_MPI_PMI_LIBRARY" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_PMI2=\$I_MPI_PMI2" >> ./run_${CLUSTER}_1node.sh
echo "export _save_I_MPI_HYDRA_BOOTSTRAP=\$I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo "unset SLURM_MPI_TYPE I_MPI_SLURM_EXT I_MPI_PMI_LIBRARY I_MPI_PMI2 I_MPI_HYDRA_BOOTSTRAP" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running autogen " >> ./run_${CLUSTER}_1node.sh
echo "../autogen.sh" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running configure " >> ./run_${CLUSTER}_1node.sh
echo "../configure " "$configureArgs" " || { cat config.log; exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "#Running make " >> ./run_${CLUSTER}_1node.sh
echo "make -j 8 || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo "touch build_done" >> ./run_${CLUSTER}_1node.sh
echo "fi" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "export TASKS=$mpiTasks" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running make " >> ./run_${CLUSTER}_1node.sh
echo "make -j 8 || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
echo " " >> ./run_${CLUSTER}_1node.sh
echo "#Running make install" >> ./run_${CLUSTER}_1node.sh
echo "make install || { exit 1; }" >> ./run_${CLUSTER}_1node.sh
......
......@@ -398,6 +398,9 @@ if test x"${enable_openmp}" = x"yes"; then
FCFLAGS="$OPENMP_FCFLAGS $FCFLAGS"
fi
#AC_LANG_POP([Fortran])
want_mpi_launcher="no"
AC_MSG_CHECKING(whether mpi-launcher should be detected)
AC_ARG_ENABLE(detect-mpi-launcher,
......@@ -596,6 +599,8 @@ if test x"$can_compile_with_mkl" = x"yes" ; then
AC_MSG_RESULT([${have_mkl}])
fi
#AC_LANG_POP([Fortran])
dnl if not mkl, check all the necessary individually
if test x"${have_mkl}" = x"yes" ; then
WITH_MKL=1
......@@ -657,6 +662,7 @@ else
AC_MSG_ERROR([could not link with scalapack: specify path])
fi
fi
AC_LANG_PUSH([Fortran])
dnl check whether we can link alltogehter
AC_MSG_CHECKING([whether we can link a Fortran program with all blacs/scalapack])
......@@ -704,7 +710,7 @@ AC_MSG_RESULT([${fortran_can_check_environment}])
if test x"${fortran_can_check_environment}" = x"yes" ; then
AC_DEFINE([HAVE_ENVIRONMENT_CHECKING],[1],[Fortran can query environment variables])
fi
AC_LANG_POP([Fortran])
dnl check whether BAND_TO_FULL_BLOCKING is set
AC_MSG_CHECKING(whether BAND_TO_FLULL_BLOCKING is requested)
......@@ -754,7 +760,7 @@ if test x"${user_sets_nvidia_gpu_compute_capability}" = x"yes" ; then
fi
fi
AC_LANG_PUSH([Fortran])
dnl Test possibility of 'use mpi', if requested
if test x"${with_mpi}" = x"yes" ; then
AC_ARG_ENABLE([mpi-module],
......@@ -788,6 +794,119 @@ if test x"${with_mpi}" = x"yes" ; then
fi
fi
fi
if test x"$with_mpi" = x"yes" && test x"$enable_openmp" = x"yes"; then
AC_MSG_CHECKING(whether the threading support of the MPI library should be checked during RUNTIME)
AC_ARG_ENABLE([runtime-threading-support-checks],
AS_HELP_STRING([--disable-runtime-threading-support-checks],
[do not check at runtime the required threading support of the MPI library. DISABLE ONLY AT YOUR OWN RISK! (default: on)]),
[
if test x"$enableval" = x"yes"; then
enable_runtime_threading_support_checks=yes
else
enable_runtime_threading_support_checks=no
fi
],
[enable_runtime_threading_support_checks=yes])
AC_MSG_RESULT([${enable_runtime_threading_support_checks}])
if test x"${enable_runtime_threading_support_checks}" = x"yes" ; then
AC_DEFINE([THREADING_SUPPORT_CHECK],[1],[can check at runtime the threading support level of MPI])
fi
AC_MSG_CHECKING(whether ELPA is allowed to limit the number of OpenMP threads at runtime)
AC_ARG_ENABLE([allow-thread-limiting],
AS_HELP_STRING([--enable-allow-thread-limiting],
[do a runtime check whether threading support of the MPI library is sufficient. If not ELPA will limit the number of OpenMP threads to 1 during the run]),
[
if test x"$enableval" = x"yes"; then
enable_allow_thread_limiting=yes
else
enable_allow_thread_limiting=no
fi
],
[enable_allow_thread_limiting=yes])
AC_MSG_RESULT([${enable_allow_thread_limiting}])
if test x"${enable_allow_thread_limiting}" = x"yes" ; then
AC_DEFINE([ALLOW_THREAD_LIMITING],[1],[ELPA can at runtime limit the number of OpenMP threads to 1 if needed])
fi
#consistency check
if test x"${enable_allow_thread_limiting}" = x"yes" && test x"${enable_runtime_threading_support_checks}" = x"no"; then
AC_MSG_ERROR([You cannot set --enable-allow-thread-limiting and --disable-runtime-threading-support-checks at the same time. Thread limiting needs runtime support checks!])
fi
if test x"${enable_allow_thread_limiting}" = x"no" && test x"${enable_runtime_threading_support_checks}" = x"yes"; then
AC_MSG_NOTICE([You set --disable-allow-thread-limiting and --enable-runtime-threading-support-checks. If ELPA detects during a run that])
AC_MSG_NOTICE([your MPI library does not provide a sufficient level of threading support, ELPA will only _print_ a warning and continue])
AC_MSG_NOTICE([This might lead to undefined behavior, includig wrong results])
fi
AC_ARG_WITH([threading-support-check-during-build],[AS_HELP_STRING([--with-threading-support-check-during-build],[Do checks at build time whether the MPI threading level support is sufficient. (default: on)])],
[
if test x"$withval" = x"yes"; then
with_threading_support_check_during_build=yes
else
with_threading_support_check_during_build=no
fi
],
[with_threading_support_check_during_build=yes])
fi
if test x"${enable_openmp}" = x"yes" && test x"${with_mpi}" = x"yes" && test x"${with_threading_support_check_during_build}" = x"yes"; then
mpi_threading_level_sufficient=no
AC_MSG_NOTICE([**************************************************************************************************************************])
AC_MSG_NOTICE([* Please notice if the following step hangs or aborts abnormaly then you cannot run a short MPI-program during configure *])
AC_MSG_NOTICE([* In this case please re-run configure with '--without-threading-support-check-during-build' _AND_ follow the hints in *])
AC_MSG_NOTICE([* the INSTALL and USER_GUIDE documents! *])
AC_MSG_NOTICE([* In case you get some other warnings about threading support follow on of the steps detailed there *])
AC_MSG_NOTICE([**************************************************************************************************************************])
AC_MSG_CHECKING(what threading level is supported by the MPI library)
AC_RUN_IFELSE([AC_LANG_SOURCE([
program testit
use iso_c_binding
implicit none
include "mpif.h"
integer(kind=c_int) :: provided, error, status
status = 1
call mpi_init_thread(MPI_THREAD_MULTIPLE, provided,error)
!call mpi_init(error)
!call mpi_query_thread(provided, error)
if (provided .eq. MPI_THREAD_SERIALIZED .or. provided .eq. MPI_THREAD_MULTIPLE) then
status = 0
endif
call mpi_finalize(error)
call exit(status)
end
])],
[mpi_threading_level_sufficient=yes],
[mpi_threading_level_sufficient=no],
[mpi_threading_level_sufficient=yes]
)
AC_MSG_RESULT([${mpi_threading_level_sufficient}])
if test x"${mpi_threading_level_sufficient}" = x"yes" ; then
AC_DEFINE([HAVE_SUFFICIENT_MPI_THREADING_SUPPORT],[1],[MPI threading support is sufficient])
else
AC_MSG_WARN([Your MPI implementation does not provide a sufficient threading level for OpenMP])
AC_MSG_WARN([You do have several options:])
AC_MSG_WARN([ * disable OpenMP (--disable-openmp): this will ensure correct results, but maybe some performance drop])
AC_MSG_WARN([ * use an MPI-library with the required threading support level (see the INSTALL and USER_GUIDE): this will ])
AC_MSG_WARN([ ensure correct results and best performance])
AC_MSG_WARN([ * allow ELPA at runtime to change the number of threads to 1 by setting "--enable-runtime-threading-support-checks])
AC_MSG_WARN([ --enable-allow-thread-limiting --without-threading-support-check-during-build": this will ensure correct results, but ])
AC_MSG_WARN([ maybe not the best performance (depends on the threading of your blas/lapack libraries), see the USER_GUIDE])
AC_MSG_WARN([ * switch of the checking of threading support "--disable-runtime-threading-support-checks ])
AC_MSG_WARN([ --without-threading-support-check-during-build: DO THIS AT YOUR OWN RISK! This will be fast, but might])
AC_MSG_WARN([ (depending on your MPI library sometimes) lead to wrong results])
AC_MSG_ERROR([You do have to take an action of the choices above!])
fi
fi
AC_LANG_POP([Fortran])
dnl Assemble the list of kernels to build
......@@ -2329,6 +2448,13 @@ else
make -f $srcdir/generated_headers.am generated-headers top_srcdir="$srcdir" CPP="$CPP"
fi
if test x"$enable_threading_support_checks" = x"no" && test x"$with_mpi" = x"yes" && test x"${enable_openmp}" = x"yes"; then
echo " "
echo " You disabled the checking whether your MPI library offers a sufficient level of threading support!"
echo " You 'convince' ELPA that everything is ok, do not complain about problems with ELPA in this build!"
echo " "
fi
if test x"$old_elpa_version" = x"yes"; then
echo " "
echo " It is possible that your current version of ELPA is not the latest one."
......
This diff is collapsed.
......@@ -225,14 +225,18 @@ function elpa_solve_evp_&
omp_threads_caller = omp_get_max_threads()
! check the number of threads that ELPA should use internally
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
call obj%get("limit_openmp_threads",limitThreads,error)
if (limitThreads .eq. 0) then
#endif
call obj%get("omp_threads",nrThreads,error)
call omp_set_num_threads(nrThreads)
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
else
nrThreads = 1
call omp_set_num_threads(nrThreads)
endif
#endif
#else
nrThreads = 1
#endif
......
......@@ -88,14 +88,18 @@
omp_threads_caller = omp_get_max_threads()
! check the number of threads that ELPA should use internally
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
call obj%get("limit_openmp_threads",limitThreads,error)
if (limitThreads .eq. 0) then
#endif
call obj%get("omp_threads",nrThreads,error)
call omp_set_num_threads(nrThreads)
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
else
nrThreads = 1
call omp_set_num_threads(nrThreads)
endif
#endif
#else
nrThreads=1
......
......@@ -99,14 +99,18 @@
omp_threads_caller = omp_get_max_threads()
! check the number of threads that ELPA should use internally
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
call obj%get("limit_openmp_threads",limitThreads,error)
if (limitThreads .eq. 0) then
#endif
call obj%get("omp_threads",nrThreads,error)
call omp_set_num_threads(nrThreads)
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
else
nrThreads = 1
call omp_set_num_threads(nrThreads)
endif
#endif
#else
nrThreads=1
#endif
......
......@@ -263,14 +263,18 @@
omp_threads_caller = omp_get_max_threads()
! check the number of threads that ELPA should use internally
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
call obj%get("limit_openmp_threads",limitThreads,error)
if (limitThreads .eq. 0) then
#endif
call obj%get("omp_threads",nrThreads,error)
call omp_set_num_threads(nrThreads)
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
else
nrThreads = 1
call omp_set_num_threads(nrThreads)
endif
#endif
#else
nrThreads = 1
......
......@@ -634,15 +634,22 @@ module elpa_impl
endif
endif
#ifdef WITH_OPENMP_TRADITIONAL
#if defined(WITH_OPENMP_TRADITIONAL) && defined(THREADING_SUPPORT_CHECK) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
! check the threading level supported by the MPI library
call mpi_query_thread(providedMPI, mpierr)
if ((providedMPI .ne. MPI_THREAD_SERIALIZED) .or. (providedMPI .ne. MPI_THREAD_MULTIPLE)) then
#if defined(ALLOW_THREAD_LIMITING)
write(error_unit,*) "WARNING elpa_setup: MPI threading level MPI_THREAD_SERALIZED or MPI_THREAD_MULTIPLE required but &
&your implementation does not support this. The number of OpenMP threads within ELPA will be &
&your implementation does not support this! The number of OpenMP threads within ELPA will be &
&limited to 1"
call self%set("limit_openmp_threads", 1, error)
if (check_elpa_set(error, ELPA_ERROR_SETUP)) return
#else
write(error_unit,*) "WARNING elpa_setup: MPI threading level MPI_THREAD_SERALIZED or MPI_THREAD_MULTIPLE required but &
&your implementation does not support this! Since you did not build ELPA with &
&--enable-allow-thread-limiting, this is severe warning. ELPA will _not_ try to cure this problem and&
&the results might be wrong. USE AT YOUR OWN RISK !"
#endif
endif
#endif
......
......@@ -286,7 +286,9 @@ static const elpa_index_int_entry_t int_entries[] = {
#endif
INT_ENTRY("cannon_buffer_size", "Increasing the buffer size might make it faster, but costs memory", 0, ELPA_AUTOTUNE_NOT_TUNABLE, ELPA_AUTOTUNE_DOMAIN_ANY, \
cannon_buffer_size_cardinality, cannon_buffer_size_enumerate, cannon_buffer_size_is_valid, NULL, PRINT_YES),
#if defined(THREADING_SUPPORT_CHECK) && defined(ALLOW_THREAD_LIMITING) && !defined(HAVE_SUFFICIENT_MPI_THREADING_SUPPORT)
BOOL_ENTRY("limit_openmp_threads", "Limit the number if openmp threads to 1", 0, ELPA_AUTOTUNE_NOT_TUNABLE, 0, PRINT_NO),
#endif
BOOL_ENTRY("qr", "Use QR decomposition, only used for ELPA_SOLVER_2STAGE, real case", 0, ELPA_AUTOTUNE_NOT_TUNABLE, ELPA_AUTOTUNE_DOMAIN_REAL, PRINT_YES),
BOOL_ENTRY("timings", "Enable time measurement", 0, ELPA_AUTOTUNE_NOT_TUNABLE, 0, PRINT_YES),
BOOL_ENTRY("debug", "Emit verbose debugging messages", 0, ELPA_AUTOTUNE_NOT_TUNABLE, 0, PRINT_YES),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment