Commit 9a1e9b62 authored by Andreas Marek's avatar Andreas Marek

Start to add redistribute

parent 919e9ba0
......@@ -104,30 +104,6 @@ if test x"${with_mpi}" = x"yes"; then
AC_DEFINE([WITH_MPI], [1], [use MPI])
fi
dnl Scalapack tests
AC_MSG_CHECKING(whether --enable-scalapack-tests is specified)
AC_ARG_ENABLE([scalapack-tests],
AS_HELP_STRING([--enable-scalapack-tests],
[build SCALAPACK test cases for performance comparison, needs MPI, default no.]),
[
if test x"$enableval" = x"yes"; then
enable_scalapack_tests=yes
else
enable_scalapack_tests=no
fi
],
[enable_scalapack_tests="no"])
AC_MSG_RESULT([$enable_scalapack_tests])
if test x"${enable_scalapack_tests}" = x"yes"; then
if test x"$with_mpi" = x"no"; then
AC_MSG_ERROR([You cannot build the SCALAPCK test cases without MPI])
fi
AC_DEFINE([WITH_SCALAPACK_TESTS], [1], [build SCALAPACK test cases])
fi
AM_CONDITIONAL([WITH_SCALAPACK_TESTS], [test x"$enable_scalapack_tests" = x"yes"])
dnl C
AC_LANG_PUSH([C])
......@@ -1411,6 +1387,100 @@ if test x"${enable_autotuning}" = x"yes"; then
AC_DEFINE([ENABLE_AUTOTUNING], [1], [enable autotuning functionality])
fi
dnl Scalapack tests
AC_MSG_CHECKING(whether --enable-scalapack-tests is specified)
AC_ARG_ENABLE([scalapack-tests],
AS_HELP_STRING([--enable-scalapack-tests],
[build SCALAPACK test cases for performance comparison, needs MPI, default no.]),
[
if test x"$enableval" = x"yes"; then
enable_scalapack_tests=yes
else
enable_scalapack_tests=no
fi
],
[enable_scalapack_tests="no"])
AC_MSG_RESULT([$enable_scalapack_tests])
if test x"${enable_scalapack_tests}" = x"yes"; then
if test x"$with_mpi" = x"no"; then
AC_MSG_ERROR([You cannot build the SCALAPCK test cases without MPI])
fi
AC_DEFINE([WITH_SCALAPACK_TESTS], [1], [build SCALAPACK test cases])
fi
AM_CONDITIONAL([WITH_SCALAPACK_TESTS], [test x"$enable_scalapack_tests" = x"yes"])
AC_MSG_CHECKING(whether matrix redistribution should be considered in autotuning)
AC_ARG_ENABLE([autotune-redistribute-matrix],
AS_HELP_STRING([--enable-autotune-redistribute-matrix],
[Allows ELPA during autotuning to re-distribute the matrix to find the best (ELPA internal) block size for block-cyclic distribution (Needs Scalapack functionality)]),
[if test x"$enableval" = x"yes"; then
enable_autotune_redistribute_matrix=yes
else
enable_autotune_redistribute_matrix=no
fi],
[enable_autotune_redistribute_matrix=no])
AC_MSG_RESULT([${enable_autotune_redistribute_matrix}])
if test x"${enable_autotune_redistribute_matrix}" = x"yes" ; then
if test x"${enable_scalapack_tests}" = x"no"; then
AC_MSG_ERROR([Please also set --enable_scalapack_tests in this case])
fi
if test x"${with_mpi}" = x"no"; then
AC_MSG_ERROR([For this option ELPA must be build with MPI enabled])
fi
AC_DEFINE([REDISTRIBUTE_MATRIX],[1],[enable matrix re-distribution during autotuning])
fi
dnl Scalapack tests
AC_MSG_CHECKING(whether --enable-scalapack-tests is specified)
AC_ARG_ENABLE([scalapack-tests],
AS_HELP_STRING([--enable-scalapack-tests],
[build SCALAPACK test cases for performance comparison, needs MPI, default no.]),
[
if test x"$enableval" = x"yes"; then
enable_scalapack_tests=yes
else
enable_scalapack_tests=no
fi
],
[enable_scalapack_tests="no"])
AC_MSG_RESULT([$enable_scalapack_tests])
if test x"${enable_scalapack_tests}" = x"yes"; then
if test x"$with_mpi" = x"no"; then
AC_MSG_ERROR([You cannot build the SCALAPCK test cases without MPI])
fi
AC_DEFINE([WITH_SCALAPACK_TESTS], [1], [build SCALAPACK test cases])
fi
AM_CONDITIONAL([WITH_SCALAPACK_TESTS], [test x"$enable_scalapack_tests" = x"yes"])
AC_MSG_CHECKING(whether matrix redistribution should be considered in autotuning)
AC_ARG_ENABLE([autotune-redistribute-matrix],
AS_HELP_STRING([--enable-autotune-redistribute-matrix],
[Allows ELPA during autotuning to re-distribute the matrix to find the best (ELPA internal) block size for block-cyclic distribution (Needs Scalapack functionality)]),
[if test x"$enableval" = x"yes"; then
enable_autotune_redistribute_matrix=yes
else
enable_autotune_redistribute_matrix=no
fi],
[enable_autotune_redistribute_matrix=no])
AC_MSG_RESULT([${enable_autotune_redistribute_matrix}])
if test x"${enable_autotune_redistribute_matrix}" = x"yes" ; then
if test x"${enable_scalapack_tests}" = x"no"; then
AC_MSG_ERROR([Please also set --enable_scalapack_tests in this case])
fi
if test x"${with_mpi}" = x"no"; then
AC_MSG_ERROR([For this option ELPA must be build with MPI enabled])
fi
AC_DEFINE([REDISTRIBUTE_MATRIX],[1],[enable matrix re-distribution during autotuning])
fi
AC_MSG_CHECKING(whether C tests should be provided)
AC_ARG_ENABLE([c-tests],
AS_HELP_STRING([--enable-c-tests],
......
......@@ -9,6 +9,16 @@
name = value,
#define ELPA_ENUM_SUM(name, value, ...) +1
/* MATRIX layout */
#define ELPA_FOR_ALL_MATRIX_LAYOUTS(X) \
X(COLUMN_MAJOR_ORDER, 1) \
X(ROW_MAJOR_ORDER, 2)
enum MATRIX_LAYOUTS {
ELPA_FOR_ALL_MATRIX_LAYOUTS(ELPA_ENUM_ENTRY)
};
#define ELPA_NUMBER_OF_MATRIX_LAYOUTS (0 ELPA_FOR_ALL_MATRIX_LAYOUTS(ELPA_ENUM_SUM))
/* Solver constants */
#define ELPA_FOR_ALL_SOLVERS(X) \
......
......@@ -54,7 +54,7 @@
implicit none
#include "../general/precision_kinds.F90"
class(elpa_abstract_impl_t), intent(inout) :: obj
integer(kind=ik) :: na, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
integer(kind=ik) :: na, matrixRows, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
#ifdef USE_ASSUMED_SIZE
MATH_DATATYPE(kind=rck) :: a(obj%local_nrows,*)
#else
......@@ -94,7 +94,7 @@
#endif
na = obj%na
lda = obj%local_nrows
matrixRows = obj%local_nrows
nblk = obj%nblk
matrixCols = obj%local_ncols
......@@ -200,7 +200,7 @@
call obj%timer%start("blas")
call PRECISION_POTRF('U', int(na-n+1,kind=BLAS_KIND), a(l_row1,l_col1), &
int(lda,kind=BLAS_KIND), infoBLAS )
int(matrixRows,kind=BLAS_KIND), infoBLAS )
info = int(infoBLAS,kind=ik)
call obj%timer%stop("blas")
......@@ -233,7 +233,7 @@
call obj%timer%start("blas")
call PRECISION_POTRF('U', int(nblk,kind=BLAS_KIND), a(l_row1,l_col1), &
int(lda,kind=BLAS_KIND) , infoBLAS )
int(matrixRows,kind=BLAS_KIND) , infoBLAS )
info = int(infoBLAS,kind=ik)
call obj%timer%stop("blas")
......@@ -283,7 +283,7 @@
if (l_cols-l_colx+1>0) &
call PRECISION_TRSM('L', 'U', BLAS_TRANS_OR_CONJ, 'N', int(nblk,kind=BLAS_KIND), &
int(l_cols-l_colx+1,kind=BLAS_KIND), ONE, tmp2, &
int(ubound(tmp2,dim=1),kind=BLAS_KIND), a(l_row1,l_colx), int(lda,kind=BLAS_KIND) )
int(ubound(tmp2,dim=1),kind=BLAS_KIND), a(l_row1,l_colx), int(matrixRows,kind=BLAS_KIND) )
call obj%timer%stop("blas")
endif
......@@ -326,7 +326,7 @@
int(nblk,kind=BLAS_KIND), -ONE, &
tmatr(lrs,1), int(ubound(tmatr,dim=1),kind=BLAS_KIND), tmatc(lcs,1), &
int(ubound(tmatc,dim=1),kind=BLAS_KIND), &
ONE, a(lrs,lcs), int(lda,kind=BLAS_KIND))
ONE, a(lrs,lcs), int(matrixRows,kind=BLAS_KIND))
call obj%timer%stop("blas")
enddo
......
......@@ -62,7 +62,7 @@
implicit none
#include "../general/precision_kinds.F90"
class(elpa_abstract_impl_t), intent(inout) :: obj
integer(kind=ik) :: na, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
integer(kind=ik) :: na, matrixRows, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
#ifdef USE_ASSUMED_SIZE
MATH_DATATYPE(kind=rck) :: a(obj%local_nrows,*)
#else
......@@ -87,7 +87,7 @@
&")
na = obj%na
lda = obj%local_nrows
matrixRows = obj%local_nrows
nblk = obj%nblk
matrixCols = obj%local_ncols
......@@ -185,7 +185,7 @@
if (my_pcol==pcol(n, nblk, np_cols)) then
call obj%timer%start("blas")
call PRECISION_TRTRI('U', 'N', int(nb,kind=BLAS_KIND), a(l_row1,l_col1), int(lda,kind=BLAS_KIND), &
call PRECISION_TRTRI('U', 'N', int(nb,kind=BLAS_KIND), a(l_row1,l_col1), int(matrixRows,kind=BLAS_KIND), &
infoBLAS)
info = int(infoBLAS,kind=ik)
call obj%timer%stop("blas")
......@@ -231,7 +231,7 @@
call obj%timer%start("blas")
if (l_cols-l_colx+1>0) &
call PRECISION_TRMM('L', 'U', 'N', 'N', int(nb,kind=BLAS_KIND), int(l_cols-l_colx+1,kind=BLAS_KIND), ONE, &
tmp2, int(ubound(tmp2,dim=1),kind=BLAS_KIND), a(l_row1,l_colx), int(lda,kind=BLAS_KIND))
tmp2, int(ubound(tmp2,dim=1),kind=BLAS_KIND), a(l_row1,l_colx), int(matrixRows,kind=BLAS_KIND))
call obj%timer%stop("blas")
if (l_colx<=l_cols) tmat2(1:nb,l_colx:l_cols) = a(l_row1:l_row1+nb-1,l_colx:l_cols)
if (my_pcol==pcol(n, nblk, np_cols)) tmat2(1:nb,l_col1:l_col1+nb-1) = tmp2(1:nb,1:nb) ! tmp2 has the lower left triangle 0
......@@ -269,7 +269,7 @@
int(nb,kind=BLAS_KIND), -ONE, &
tmat1, int(ubound(tmat1,dim=1),kind=BLAS_KIND), tmat2(1,l_col1), &
int(ubound(tmat2,dim=1),kind=BLAS_KIND), ONE, &
a(1,l_col1), int(lda,kind=BLAS_KIND) )
a(1,l_col1), int(matrixRows,kind=BLAS_KIND) )
call obj%timer%stop("blas")
......
......@@ -87,7 +87,7 @@
integer(kind=ik) :: istat
character(200) :: errorMessage
logical :: success
integer(kind=ik) :: nblk, mpi_comm_rows, mpi_comm_cols, lda, ldaCols, error
integer(kind=ik) :: nblk, mpi_comm_rows, mpi_comm_cols, matrixRows, matrixCols, error
call obj%timer%start("elpa_mult_at_b_&
&MATH_DATATYPE&
......@@ -95,10 +95,10 @@
&PRECISION&
&")
na = obj%na
nblk = obj%nblk
lda = obj%local_nrows
ldaCols = obj%local_ncols
na = obj%na
nblk = obj%nblk
matrixRows = obj%local_nrows
matrixCols = obj%local_ncols
call obj%get("mpi_comm_rows",mpi_comm_rows,error)
......
......@@ -67,7 +67,7 @@
implicit none
class(elpa_abstract_impl_t), intent(inout) :: obj
integer(kind=ik) :: na, nev, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
integer(kind=ik) :: na, nev, matrixRows, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
real(kind=REAL_DATATYPE) :: d(obj%na), e(obj%na)
#ifdef USE_ASSUMED_SIZE
real(kind=REAL_DATATYPE) :: q(obj%local_nrows,*)
......@@ -89,7 +89,7 @@
na = obj%na
nev = obj%nev
nblk = obj%nblk
ldq = obj%local_nrows
matrixRows = obj%local_nrows
matrixCols = obj%local_ncols
#ifdef WITH_OPENMP
......@@ -129,7 +129,7 @@
call solve_tridi_&
&PRECISION&
&_private_impl(obj, na, nev, d, e, q, ldq, nblk, matrixCols, &
&_private_impl(obj, na, nev, d, e, q, matrixRows, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols,.false., wantDebug, success, &
nrThreads)
......
This diff is collapsed.
......@@ -177,7 +177,7 @@
np_rows = int(np_rowsMPI,kind=MPI_KIND)
my_pcol = int(my_pcolMPI,kind=MPI_KIND)
np_cols = int(np_colsMPI,kind=MPI_KIND)
if (wantDebug) call obj%timer%stop(",kind=MPI_KIND)mpi_communication")
if (wantDebug) call obj%timer%stop("mpi_communication")
! Get global_id mapping 2D procssor coordinates to global id
......
......@@ -62,6 +62,11 @@ static int enumerate_identity(elpa_index_t index, int i);
static int cardinality_bool(elpa_index_t index);
static int valid_bool(elpa_index_t index, int n, int new_value);
static int number_of_matrix_layouts(elpa_index_t index);
static int matrix_layout_enumerate(elpa_index_t index, int i);
static int matrix_layout_is_valid(elpa_index_t index, int n, int new_value);
static const char* elpa_matrix_layout_name(int layout);
static int number_of_solvers(elpa_index_t index);
static int solver_enumerate(elpa_index_t index, int i);
static int solver_is_valid(elpa_index_t index, int n, int new_value);
......@@ -85,6 +90,10 @@ static int stripewidth_real_cardinality(elpa_index_t index);
static int stripewidth_real_enumerate(elpa_index_t index, int i);
static int stripewidth_real_is_valid(elpa_index_t index, int n, int new_value);
static int internal_nblk_cardinality(elpa_index_t index);
static int internal_nblk_enumerate(elpa_index_t index, int i);
static int internal_nblk_is_valid(elpa_index_t index, int n, int new_value);
static int stripewidth_complex_cardinality(elpa_index_t index);
static int stripewidth_complex_enumerate(elpa_index_t index, int i);
static int stripewidth_complex_is_valid(elpa_index_t index, int n, int new_value);
......@@ -189,10 +198,16 @@ static const elpa_index_int_entry_t int_entries[] = {
INT_ANY_ENTRY("mpi_comm_cols", "Communicator for inter-column communication", PRINT_NO),
INT_ANY_ENTRY("mpi_comm_parent", "Parent communicator", PRINT_NO),
INT_ANY_ENTRY("blacs_context", "BLACS context", PRINT_NO),
#ifdef REDISTRIBUTE_MATRIX
INT_ENTRY("internal_nblk", "Internally used block size of scalapack block-cyclic distribution", 0, ELPA_AUTOTUNE_FAST, ELPA_AUTOTUNE_DOMAIN_ANY, \
internal_nblk_cardinality, internal_nblk_enumerate, internal_nblk_is_valid, NULL, PRINT_YES),
#endif
#ifdef STORE_BUILD_CONFIG
INT_ENTRY("output_build_config", "Output the build config", 0, ELPA_AUTOTUNE_NOT_TUNABLE, ELPA_AUTOTUNE_DOMAIN_ANY, \
cardinality_bool, enumerate_identity, output_build_config_is_valid, NULL, PRINT_NO),
#endif
INT_ENTRY("matrix_order","Order of the matrix layout", COLUMN_MAJOR_ORDER, ELPA_AUTOTUNE_NOT_TUNABLE, ELPA_AUTOTUNE_DOMAIN_ANY, \
number_of_matrix_layouts, matrix_layout_enumerate, matrix_layout_is_valid, elpa_matrix_layout_name, PRINT_YES), \
INT_ENTRY("solver", "Solver to use", ELPA_SOLVER_1STAGE, ELPA_AUTOTUNE_FAST, ELPA_AUTOTUNE_DOMAIN_ANY, \
number_of_solvers, solver_enumerate, solver_is_valid, elpa_solver_name, PRINT_YES),
INT_ENTRY("gpu", "Use GPU acceleration", 0, ELPA_AUTOTUNE_MEDIUM, ELPA_AUTOTUNE_DOMAIN_ANY, \
......@@ -582,6 +597,48 @@ static int enumerate_identity(elpa_index_t index, int i) {
case value: \
return available && (other_checks(value));
static const char* elpa_matrix_layout_name(int layout) {
switch(layout) {
ELPA_FOR_ALL_MATRIX_LAYOUTS(NAME_CASE)
default:
return "(Invalid matrix layout)";
}
}
static int number_of_matrix_layouts(elpa_index_t index) {
return ELPA_NUMBER_OF_MATRIX_LAYOUTS;
}
static int matrix_layout_enumerate(elpa_index_t index, int i) {
#define OPTION_RANK(name, value, ...) \
+(value >= sizeof(array_of_size_value)/sizeof(int) ? 0 : 1)
#define EMPTY()
#define DEFER1(m) m EMPTY()
#define EVAL(...) __VA_ARGS__
#define ENUMERATE_CASE(name, value, ...) \
{ const int array_of_size_value[value]; \
case 0 DEFER1(INNER_ITERATOR)()(OPTION_RANK): \
return value; }
switch(i) {
#define INNER_ITERATOR() ELPA_FOR_ALL_MATRIX_LAYOUTS
EVAL(ELPA_FOR_ALL_MATRIX_LAYOUTS(ENUMERATE_CASE))
#undef INNER_ITERATOR
default:
return 0;
}
}
static int matrix_layout_is_valid(elpa_index_t index, int n, int new_value) {
switch(new_value) {
ELPA_FOR_ALL_MATRIX_LAYOUTS(VALID_CASE)
default:
return 0;
}
}
static const char* elpa_solver_name(int solver) {
switch(solver) {
ELPA_FOR_ALL_SOLVERS(NAME_CASE)
......@@ -766,6 +823,36 @@ static int band_to_full_enumerate(elpa_index_t index, int i) {
return i+1;
}
static int internal_nblk_is_valid(elpa_index_t index, int n, int new_value) {
return (0 <= new_value);
}
static int internal_nblk_cardinality(elpa_index_t index) {
return 9;
}
static int internal_nblk_enumerate(elpa_index_t index, int i) {
switch(i) {
case 0:
return 2;
case 1:
return 4;
case 2:
return 8;
case 3:
return 16;
case 4:
return 32;
case 5:
return 64;
case 6:
return 128;
case 7:
return 256;
case 8:
return 1024;
}
}
// TODO shouldnt it be only for ELPA2??
static int band_to_full_is_valid(elpa_index_t index, int n, int new_value) {
int max_block=10;
......
......@@ -6,6 +6,8 @@
! General constants
ELPA_FOR_ALL_ERRORS(FORTRAN_CONSTANT)
! matrix layout constants
ELPA_FOR_ALL_MATRIX_LAYOUTS(FORTRAN_CONSTANT)
! Solver constants
ELPA_FOR_ALL_SOLVERS(FORTRAN_CONSTANT)
......
......@@ -52,6 +52,7 @@
#undef scal_PRECISION_GEMM
#undef scal_PRECISION_NRM2
#undef scal_PRECISION_LASET
#undef scal_PRECISION_GEMR2D
#undef PRECISION_SUFFIX
#undef ELPA_IMPL_SUFFIX
......@@ -118,6 +119,7 @@
#define scal_PRECISION_GEMM PDGEMM
#define scal_PRECISION_NRM2 PDNRM2
#define scal_PRECISION_LASET PDLASET
#define scal_PRECISION_GEMR2D PDGEMR2D
#define MPI_REAL_PRECISION MPI_REAL8
#define MPI_MATH_DATATYPE_PRECISION MPI_REAL8
#define MPI_MATH_DATATYPE_PRECISION_C MPI_DOUBLE
......@@ -185,6 +187,7 @@
#define scal_PRECISION_GEMM PSGEMM
#define scal_PRECISION_NRM2 PSNRM2
#define scal_PRECISION_LASET PSLASET
#define scal_PRECISION_GEMR2D PSGEMR2D
#define MPI_REAL_PRECISION MPI_REAL4
#define MPI_MATH_DATATYPE_PRECISION MPI_REAL4
#define MPI_MATH_DATATYPE_PRECISION_C MPI_FLOAT
......@@ -260,6 +263,7 @@
#undef scal_PRECISION_GEMM
#undef scal_PRECISION_DOTC
#undef scal_PRECISION_LASET
#undef scal_PRECISION_GEMR2D
#undef PRECISION_SUFFIX
#undef ELPA_IMPL_SUFFIX
#undef MPI_COMPLEX_PRECISION
......@@ -336,6 +340,7 @@
#define scal_PRECISION_GEMM PZGEMM
#define scal_PRECISION_DOTC PZDOTC
#define scal_PRECISION_LASET PZLASET
#define scal_PRECISION_GEMR2D PZGEMR2D
#define MPI_COMPLEX_PRECISION MPI_DOUBLE_COMPLEX
#define MPI_MATH_DATATYPE_PRECISION MPI_DOUBLE_COMPLEX
#define MPI_MATH_DATATYPE_PRECISION_C MPI_DOUBLE_COMPLEX
......@@ -408,6 +413,7 @@
#define scal_PRECISION_GEMM PCGEMM
#define scal_PRECISION_DOTC PCDOTC
#define scal_PRECISION_LASET PCLASET
#define scal_PRECISION_GEMR2D PCGEMR2D
#define MPI_COMPLEX_PRECISION MPI_COMPLEX
#define MPI_MATH_DATATYPE_PRECISION MPI_COMPLEX
#define MPI_MATH_DATATYPE_PRECISION_C MPI_COMPLEX
......
......@@ -140,7 +140,15 @@
end function
end interface
interface
subroutine pdgenr2d(m, n, a, ia, ja, desca, b, ib, jb, descb, ictxt)
use PRECISION_MODULE
implicit none
integer(kind=BLAS_KIND) :: m, n, ia, ja, desca(*), ib, jb, descb(*)
real(kind=rk8) :: a(*), b(*)
integer(kind=BLAS_KIND) :: ictxt
end subroutine
end interface
interface
......@@ -195,6 +203,16 @@
end function
end interface
interface
subroutine psgenr2d(m, n, a, ia, ja, desca, b, ib, jb, descb, ictxt)
use PRECISION_MODULE
implicit none
integer(kind=BLAS_KIND) :: m, n, ia, ja, desca(*), ib, jb, descb(*)
real(kind=rk8) :: a(*), b(*)
integer(kind=BLAS_KIND) :: ictxt
end subroutine
end interface
interface
subroutine pzgemm(TRANSA, TRANSB, M, N, K, ALPHA, A, IA, JA, DESCA, B, IB, JB, DESCB, BETA, C, IC, JC, DESCC)
use PRECISION_MODULE
......@@ -249,6 +267,16 @@
end function
end interface
interface
subroutine pzgenr2d(m, n, a, ia, ja, desca, b, ib, jb, descb, ictxt)
use PRECISION_MODULE
implicit none
integer(kind=BLAS_KIND) :: m, n, ia, ja, desca(*), ib, jb, descb(*)
complex(kind=ck8) :: a(*), b(*)
integer(kind=BLAS_KIND) :: ictxt
end subroutine
end interface
interface
subroutine pcgemm(TRANSA, TRANSB, M, N, K, ALPHA, A, IA, JA, DESCA, B, IB, JB, DESCB, BETA, C, IC, JC, DESCC)
use PRECISION_MODULE
......@@ -303,3 +331,12 @@
end function
end interface
interface
subroutine pcgenr2d(m, n, a, ia, ja, desca, b, ib, jb, descb, ictxt)
use PRECISION_MODULE
implicit none
integer(kind=BLAS_KIND) :: m, n, ia, ja, desca(*), ib, jb, descb(*)
complex(kind=ck4) :: a(*), b(*)
integer(kind=BLAS_KIND) :: ictxt
end subroutine
end interface
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment