Commit b17f11c9 authored by Andreas Marek's avatar Andreas Marek
Browse files

Merge branch 'master_pre_stage' into kernel-power8

parents fa3e9892 3ef53b79
if [ "$(hostname)" == "buildtest-rzg" ]; then module load impi/5.1.3 intel/16.0 gcc/6.3 mkl/11.3 autotools pkg-config; fi
if [ "$(hostname)" == "knl1" -o "$(hostname)" == "knl2" -o "$(hostname)" == "knl3" -o "$(hostname)" == "knl4" ] ; then module load impi/5.1.3 intel/16.0 gcc/7.2 mkl/11.3 pkg-config; fi
if [ "$(hostname)" == "maik" ]; then module load impi/5.1.3 intel intel/17.0 gcc/7.2 mkl/2017 pkg-config/0.29.1; fi
if [ "$(hostname)" == "knl1" -o "$(hostname)" == "knl2" -o "$(hostname)" == "knl3" -o "$(hostname)" == "knl4" ] ; then module load impi/2017.4 intel/17.0 gcc/7.2 mkl/2017 pkg-config; fi
if [ "$(hostname)" == "maik" ]; then module load impi/5.1.3 intel intel/17.0 gcc/6.3 mkl/2017 pkg-config/0.29.1; fi
if [ "$(hostname)" == "gp02" ] ; then module load impi/5.1.3 intel/17.0 gcc/6.3 mkl/2017 pkg-config; fi
......@@ -13,7 +13,7 @@ if [ "$(hostname)" == "amarek-elpa-gitlab-runner-4" ]; then module load intel/16
if [ "$(hostname)" == "dvl01" ]; then module load intel/17.0 gcc/5.4 mkl/2017 impi/2017.2 gcc/5.4 cuda/8.0; fi
if [ "$(hostname)" == "dvl02" ]; then module load intel/17.0 gcc/5.4 mkl/2017 impi/2017.2 gcc/5.4 cuda/8.0; fi
if [ "$(hostname)" == "miy01" ]; then module load gcc/5.4 pgi/17.9 ompi/pgi/17.9/1.10.2 essl/5.5 cuda && export LD_LIBRARY_PATH=/opt/ibm/spectrum_mpi/lib:/opt/ibm/spectrum_mpi/profilesupport/lib:$LD_LIBRARY_PATH && export PATH=/opt/ibm/spectrum_mpi/bin:$PATH; fi
if [ "$(hostname)" == "miy01" ]; then module purge && module load gcc/5.4 smpi essl/5.5 cuda pgi/17.9 && export LD_LIBRARY_PATH=/opt/ibm/spectrum_mpi/lib:/opt/ibm/spectrum_mpi/profilesupport/lib:$LD_LIBRARY_PATH && export PATH=/opt/ibm/spectrum_mpi/bin:$PATH && export OMPI_CC=gcc && export OMPI_FC=gfortran; fi
if [ "$(hostname)" == "miy02" ]; then module load gcc/5.4 pgi/17.9 ompi/pgi/17.9/1.10.2 essl/5.5 cuda && export LD_LIBRARY_PATH=/opt/ibm/spectrum_mpi/lib:/opt/ibm/spectrum_mpi/profilesupport/lib:$LD_LIBRARY_PATH && export PATH=/opt/ibm/spectrum_mpi/bin:$PATH; fi
if [ "$(hostname)" == "miy03" ]; then module load gcc/5.4 pgi/17.9 ompi/pgi/17.9/1.10.2 essl/5.5 cuda && export LD_LIBRARY_PATH=/opt/ibm/spectrum_mpi/lib:/opt/ibm/spectrum_mpi/profilesupport/lib:$LD_LIBRARY_PATH && export PATH=/opt/ibm/spectrum_mpi/bin:$PATH; fi
......
This diff is collapsed.
......@@ -14,13 +14,13 @@ def set_number_of_cores(mpi_tasks, o):
def set_requested_memory(na):
memory="None"
if (na == "150"):
memory="1Gb"
memory="2Gb"
elif (na > "150" and na <= "1500"):
memory="4Gb"
memory="6Gb"
elif (na > "1500" and na < "5000"):
memory="8Gb"
memory="12Gb"
else:
memory="10Gb"
memory="16Gb"
return memory
def set_compiler_wrappers(mpi, fc, cc, instr, fortran_compiler, c_compiler):
......@@ -368,10 +368,11 @@ coverage = {
#disable avx2 at the moment
#disable avx512
#"knl" : "--enable-avx512",
instruction_set = {
"sse" : " --enable-sse --enable-sse-assembly",
"avx" : " --enable-avx",
"knl" : "--enable-avx512",
"avx2" : " --enable-avx2",
"power8" : " --disable-sse --disable-sse-assembly --disable-avx --disable-avx2 --disable-mpi-module --with-GPU-compute-capability=sm_60 ",
}
......@@ -393,7 +394,9 @@ matrix_size = {
MPI_TASKS=2
for cc, fc, m, o, p, a, b, g, cov, instr, addr, na in product(
# sorted(gpu.keys()),
#for cc, fc, m, o, p, a, b, g, cov, instr, addr, na in product(
for cc, fc, m, o, p, a, b, cov, instr, addr, na in product(
sorted(c_compiler.keys()),
sorted(fortran_compiler.keys()),
sorted(mpi.keys()),
......@@ -401,7 +404,6 @@ for cc, fc, m, o, p, a, b, g, cov, instr, addr, na in product(
sorted(precision.keys()),
sorted(assumed_size.keys()),
sorted(band_to_full_blocking.keys()),
sorted(gpu.keys()),
sorted(coverage.keys()),
sorted(instruction_set.keys()),
sorted(address_sanitize_flag.keys()),
......@@ -410,6 +412,7 @@ for cc, fc, m, o, p, a, b, g, cov, instr, addr, na in product(
nev = 150
nblk = 16
g = "no-gpu"
# do not all combinations with all compilers
# especially - use pgi only on minskys for now
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment