Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
elpa
elpa
Commits
cebd342e
Commit
cebd342e
authored
Jan 18, 2022
by
Andreas Marek
Browse files
Gitlab CI: special flags if testing with oneapi compilers
parent
b3afa520
Changes
3
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
.gitlab-ci.yml
View file @
cebd342e
This diff is collapsed.
Click to expand it.
ci_test_scripts/generate_gitlab_ci_tests.py
View file @
cebd342e
...
...
@@ -26,24 +26,28 @@ def set_compiler_wrappers(mpi, fc, cc, instr, fortran_compiler, c_compiler):
fortran_compiler_wrapper
=
"undefined"
c_compiler_wrapper
=
"undefined"
if
(
instr
!=
"power8"
):
if
(
m
==
"mpi"
and
fc
==
"intel2"
):
if
(
m
==
"mpi"
and
fc
==
"intel"
):
fortran_compiler_wrapper
=
"mpiifort"
if
(
m
==
"mpi"
and
fc
==
"oneapi"
):
fortran_compiler_wrapper
=
"mpiifort -fc=ifx"
#if (m == "mpi" and fc == "intel2"):
# fortran_compiler_wrapper="mpiifort"
if
(
m
==
"mpi"
and
fc
==
"gnu"
):
fortran_compiler_wrapper
=
"mpif90"
if
(
m
==
"mpi"
and
cc
==
"intel2"
):
if
(
m
==
"mpi"
and
cc
==
"intel"
):
c_compiler_wrapper
=
"mpiicc -cc=icx"
if
(
m
==
"mpi"
and
cc
==
"oneapi"
):
c_compiler_wrapper
=
"mpiicc -cc=icx"
#if (m == "mpi" and cc == "intel2"):
# c_compiler_wrapper="mpiicc"
if
(
m
==
"mpi"
and
cc
==
"gnu"
):
c_compiler_wrapper
=
"mpicc"
if
(
m
==
"nompi"
and
fc
==
"intel2"
):
if
(
m
==
"nompi"
and
fc
==
"intel"
):
fortran_compiler_wrapper
=
fortran_compiler
[
fc
]
if
(
m
==
"nompi"
and
fc
==
"oneapi"
):
fortran_compiler_wrapper
=
fortran_compiler
[
fc
]
if
(
m
==
"nompi"
and
fc
==
"gnu"
):
fortran_compiler_wrapper
=
fortran_compiler
[
fc
]
if
(
m
==
"nompi"
and
cc
==
"intel2"
):
if
(
m
==
"nompi"
and
cc
==
"intel"
):
c_compiler_wrapper
=
c_compiler
[
cc
]
if
(
m
==
"nompi"
and
cc
==
"oneapi"
):
c_compiler_wrapper
=
c_compiler
[
cc
]
if
(
m
==
"nompi"
and
cc
==
"gnu"
):
c_compiler_wrapper
=
c_compiler
[
cc
]
...
...
@@ -66,7 +70,7 @@ def set_scalapack_flags(instr, fc, g, m, o):
ldflags
=
"undefined"
if
(
instr
!=
"power8"
):
if
(
fc
==
"intel
2
"
):
if
(
fc
==
"intel
"
or
fc
==
"oneapi
"
):
if
(
m
==
"mpi"
):
if
(
o
==
"openmp"
):
scalapackldflags
=
"$MKL_INTEL_SCALAPACK_LDFLAGS_MPI_OMP "
...
...
@@ -568,14 +572,18 @@ print("#The tests follow here")
c_compiler
=
{
"gnu"
:
"gcc"
,
"intel2"
:
"icx"
,
"intel"
:
"icc"
,
"oneapi"
:
"icx"
,
}
# "oneapi" : "icx",
# "intel" : "icc",
fortran_compiler
=
{
"gnu"
:
"gfortran"
,
"intel2"
:
"ifx"
,
"intel"
:
"ifort"
,
"oneapi"
:
"ifx"
,
}
# "oneapi" : "ifx",
# "intel" : "ifort",
#"pgi" : "pgfortran",
...
...
@@ -675,15 +683,19 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
# - if pgfortran => use always GPUs
# - if gfortran disable MPI module
# - on KNL only use intel, do not test openmp
if
(
fc
==
"oneapi"
and
cc
!=
"oneapi"
):
continue
if
(
cc
==
"oneapi"
and
fc
!=
"oneapi"
):
continue
if
(
instr
==
"power8"
and
(
fc
!=
"pgi"
and
fc
!=
"gnu"
)):
continue
if
(
instr
==
"knl"
and
(
fc
!=
"intel
2
"
and
cc
!=
"intel
2
"
)):
if
(
instr
==
"knl"
and
(
fc
!=
"intel"
and
cc
!=
"intel"
)):
continue
if
(
instr
==
"knl"
and
o
==
"openmp"
):
continue
if
(
fc
==
"pgi"
and
instr
!=
"power8"
):
continue
if
(
cc
==
"intel
2
"
and
fc
==
"gnu"
):
if
(
(
cc
==
"intel"
or
cc
==
"oneapi"
)
and
fc
==
"gnu"
):
continue
if
(
fc
==
"pgi"
and
g
!=
"with-gpu"
):
continue
...
...
@@ -697,6 +709,13 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
if
(
instr
==
"power8"
and
(
m
==
"nompi"
or
g
==
"no-gpu"
)):
continue
if
(
fc
==
"oneapi"
):
COMPILER_CONFIGURE_EXTRAS
=
"--disable-c-tests --disable-mpi-module"
#elif (fc == "mpiifort -fc=ifx"):
# COMPILER_CONFIGURE_EXTRAS = "--disable-c-tests --disable-mpi-module"
else
:
COMPILER_CONFIGURE_EXTRAS
=
" "
# set C and FCFLAGS according to instruction set
(
CFLAGS
,
FCFLAGS
,
INSTRUCTION_OPTIONS
)
=
set_cflags_fcflags
(
instr
,
cc
,
fc
,
instruction_set
)
...
...
@@ -758,9 +777,9 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
continue
#no gpu testing with intel C compiler (gcc needed)
if
(
g
==
"with-gpu"
and
cc
==
"intel
2
"
):
if
(
g
==
"with-gpu"
and
(
cc
==
"intel"
or
cc
==
"oneapi"
)
):
continue
if
(
g
==
"with-sm80-gpu"
and
cc
==
"intel
2
"
):
if
(
g
==
"with-sm80-gpu"
and
(
cc
==
"intel"
or
cc
==
"oneapi"
)
):
continue
#at the moment gpu testing only on AVX machines or minskys
...
...
@@ -854,7 +873,7 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
+
libs
+
" "
+
ldflags
+
" "
+
" "
+
scalapackldflags
+
" "
+
scalapackfcflags
\
+
" --enable-option-checking=fatal"
+
" "
+
mpi_configure_flag
+
" "
+
openmp
[
o
]
\
+
" "
+
precision
[
p
]
+
" "
+
assumed_size
[
a
]
+
" "
+
band_to_full_blocking
[
b
]
\
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -i $INTERACTIVE_RUN -S $SLURM -g "
+
gpuJob
)
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
COMPILER_CONFIGURE_EXTRAS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -i $INTERACTIVE_RUN -S $SLURM -g "
+
gpuJob
)
if
(
instr
==
"avx2"
or
instr
==
"avx512"
or
instr
==
"knl"
or
g
==
"with-gpu"
or
g
==
"with-sm80-gpu"
):
print
(
" - export REQUESTED_MEMORY="
+
memory
)
...
...
@@ -865,7 +884,7 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
+
libs
+
" "
+
ldflags
+
" "
+
" "
+
scalapackldflags
+
" "
+
scalapackfcflags
\
+
" --enable-option-checking=fatal --enable-scalapack-tests --enable-autotune-redistribute-matrix"
+
" "
+
mpi_configure_flag
+
" "
+
openmp
[
o
]
\
+
" "
+
precision
[
p
]
+
" "
+
assumed_size
[
a
]
+
" "
+
band_to_full_blocking
[
b
]
\
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q
\"
srun
\"
-S $SLURM -g "
+
gpuJob
)
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
COMPILER_CONFIGURE_EXTRAS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q
\"
srun
\"
-S $SLURM -g "
+
gpuJob
)
else
:
...
...
@@ -873,7 +892,7 @@ for cc, fc, m, o, p, a, b, g, instr, addr, na in product(
+
libs
+
" "
+
ldflags
+
" "
+
" "
+
scalapackldflags
+
" "
+
scalapackfcflags
\
+
" --enable-option-checking=fatal"
+
" "
+
mpi_configure_flag
+
" "
+
openmp
[
o
]
\
+
" "
+
precision
[
p
]
+
" "
+
assumed_size
[
a
]
+
" "
+
band_to_full_blocking
[
b
]
\
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q
\"
srun
\"
-i $INTERACTIVE_RUN -S $SLURM -g "
+
gpuJob
)
+
" "
+
gpu
[
g
]
+
INSTRUCTION_OPTIONS
+
COMPILER_CONFIGURE_EXTRAS
+
"
\"
-j 8 -t $MPI_TASKS -m $MATRIX_SIZE -n $NUMBER_OF_EIGENVECTORS -b $BLOCK_SIZE -s $SKIP_STEP -q
\"
srun
\"
-i $INTERACTIVE_RUN -S $SLURM -g "
+
gpuJob
)
# do the test
...
...
src/elpa2/qr/elpa_pdlarfb_template.F90
View file @
cebd342e
...
...
@@ -107,11 +107,11 @@ subroutine qr_pdlarfb_1dcomm_&
! Z' = Y' * A
if
(
localsize
.gt.
0
)
then
#ifdef DOUBLE_PRECISION_REAL
call
dgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
localsize
,
&
call
dgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
int
(
localsize
,
kind
=
BLAS_KIND
),
&
1.0_rk8
,
v
(
baseoffset
,
1
),
int
(
ldv
,
kind
=
BLAS_KIND
),
&
a
(
offset
,
1
),
int
(
lda
,
kind
=
BLAS_KIND
),
0.0_rk8
,
work
(
1
,
1
),
int
(
k
,
kind
=
BLAS_KIND
))
#else
call
sgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
localsize
,
&
call
sgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
int
(
localsize
,
kind
=
BLAS_KIND
),
&
1.0_rk4
,
v
(
baseoffset
,
1
),
int
(
ldv
,
kind
=
BLAS_KIND
),
&
a
(
offset
,
1
),
int
(
lda
,
kind
=
BLAS_KIND
),
0.0_rk4
,
work
(
1
,
1
),
int
(
k
,
kind
=
BLAS_KIND
))
#endif
...
...
@@ -200,7 +200,7 @@ subroutine qr_pdlarft_pdlarfb_1dcomm_&
! calculate matrix matrix product of householder vectors and target matrix
! Z' = Y' * A
call
dgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
localsize
,
&
call
dgemm
(
"Trans"
,
"Notrans"
,
int
(
k
,
kind
=
BLAS_KIND
),
int
(
n
,
kind
=
BLAS_KIND
),
int
(
localsize
,
kind
=
BLAS_KIND
),
&
1.0_rk8
,
v
(
baseoffset
,
1
),
int
(
ldv
,
kind
=
BLAS_KIND
),
a
(
offset
,
1
),
&
int
(
lda
,
kind
=
BLAS_KIND
),
0.0_rk8
,
work
(
1
,
k
+1
),
int
(
k
,
kind
=
BLAS_KIND
))
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment