Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
elpa
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
Operations
Operations
Incidents
Environments
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
elpa
elpa
Commits
9c8470f1
Commit
9c8470f1
authored
Jun 14, 2017
by
Andreas Marek
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Start to unfiy complex single/double AVX block 1 kernel
parent
285761e0
Changes
4
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
791 additions
and
1051 deletions
+791
-1051
Makefile.am
Makefile.am
+1
-0
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_double_precision.c
...els/elpa2_kernels_complex_avx-avx2_1hv_double_precision.c
+8
-518
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_single_precision.c
...els/elpa2_kernels_complex_avx-avx2_1hv_single_precision.c
+8
-533
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_template.Xc
...a2/kernels/elpa2_kernels_complex_avx-avx2_1hv_template.Xc
+774
-0
No files found.
Makefile.am
View file @
9c8470f1
...
...
@@ -597,6 +597,7 @@ EXTRA_DIST = \
src/elpa2/kernels/elpa2_kernels_real_avx-avx2_6hv_template.Xc
\
src/elpa2/kernels/elpa2_kernels_complex_sse_1hv_template.Xc
\
src/elpa2/kernels/elpa2_kernels_complex_sse_2hv_template.Xc
\
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_template.Xc
\
src/elpa2/redist_band.X90
\
src/elpa2/pack_unpack_cpu.X90
\
src/elpa2/pack_unpack_gpu.X90
\
...
...
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_double_precision.c
View file @
9c8470f1
...
...
@@ -42,524 +42,14 @@
// any derivatives of ELPA under the same license that we chose for
// the original distribution, the GNU Lesser General Public License.
//
//
// --------------------------------------------------------------------------------------------------
//
// This file contains the compute intensive kernels for the Householder transformations.
// It should be compiled with the highest possible optimization level.
//
// On Intel Nehalem or Intel Westmere or AMD Magny Cours use -O3 -msse3
// On Intel Sandy Bridge use -O3 -mavx
//
// Copyright of the original code rests with the authors inside the ELPA
// consortium. The copyright of any additional modifications shall rest
// with their original authors, but shall adhere to the licensing terms
// distributed along with the original code in the file "COPYING".
//
// Author: Alexander Heinecke (alexander.heinecke@mytum.de)
// Adapted for building a shared-library by Andreas Marek, MPCDF (andreas.marek@mpcdf.mpg.de)
// --------------------------------------------------------------------------------------------------
#include "config-f90.h"
#include <complex.h>
#include <x86intrin.h>
#define __forceinline __attribute__((always_inline))
#ifdef HAVE_AVX2
#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_maddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_msubadd_pd(a,b,c)
#endif
#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_fmaddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_fmsubadd_pd(a,b,c)
#endif
#endif
//Forward declaration
static
__forceinline
void
hh_trafo_complex_kernel_12_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
);
static
__forceinline
void
hh_trafo_complex_kernel_8_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
);
static
__forceinline
void
hh_trafo_complex_kernel_4_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
);
/*
!f>#if defined(HAVE_AVX) || defined(HAVE_AVX2)
!f> interface
!f> subroutine single_hh_trafo_complex_avx_avx2_1hv_double(q, hh, pnb, pnq, pldq) &
!f> bind(C, name="single_hh_trafo_complex_avx_avx2_1hv_double")
!f> use, intrinsic :: iso_c_binding
!f> integer(kind=c_int) :: pnb, pnq, pldq
!f> ! complex(kind=c_double_complex) :: q(*)
!f> type(c_ptr), value :: q
!f> complex(kind=c_double_complex) :: hh(pnb,2)
!f> end subroutine
!f> end interface
!f>#endif
*/
void
single_hh_trafo_complex_avx_avx2_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
*
pnb
,
int
*
pnq
,
int
*
pldq
)
{
int
i
;
int
nb
=
*
pnb
;
int
nq
=
*
pldq
;
int
ldq
=
*
pldq
;
//int ldh = *pldh;
for
(
i
=
0
;
i
<
nq
-
8
;
i
+=
12
)
{
hh_trafo_complex_kernel_12_AVX_1hv_double
(
&
q
[
i
],
hh
,
nb
,
ldq
);
}
if
(
nq
==
i
)
{
return
;
}
if
(
nq
-
i
==
8
)
{
hh_trafo_complex_kernel_8_AVX_1hv_double
(
&
q
[
i
],
hh
,
nb
,
ldq
);
}
else
{
hh_trafo_complex_kernel_4_AVX_1hv_double
(
&
q
[
i
],
hh
,
nb
,
ldq
);
}
}
static
__forceinline
void
hh_trafo_complex_kernel_12_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
)
{
double
*
q_dbl
=
(
double
*
)
q
;
double
*
hh_dbl
=
(
double
*
)
hh
;
__m256d
x1
,
x2
,
x3
,
x4
,
x5
,
x6
;
__m256d
q1
,
q2
,
q3
,
q4
,
q5
,
q6
;
__m256d
h1_real
,
h1_imag
;
__m256d
tmp1
,
tmp2
,
tmp3
,
tmp4
,
tmp5
,
tmp6
;
int
i
=
0
;
__m256d
sign
=
(
__m256d
)
_mm256_set_epi64x
(
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
);
x1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
x2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
x3
=
_mm256_load_pd
(
&
q_dbl
[
8
]);
x4
=
_mm256_load_pd
(
&
q_dbl
[
12
]);
x5
=
_mm256_load_pd
(
&
q_dbl
[
16
]);
x6
=
_mm256_load_pd
(
&
q_dbl
[
20
]);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
#ifndef __ELPA_USE_FMA__
// conjugate
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
]);
q5
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
16
]);
q6
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
20
]);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
q1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_add_pd
(
x1
,
_mm256_FMSUBADD_pd
(
h1_real
,
q1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
x1
=
_mm256_add_pd
(
x1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
q2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_add_pd
(
x2
,
_mm256_FMSUBADD_pd
(
h1_real
,
q2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
x2
=
_mm256_add_pd
(
x2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
q3
);
#ifdef __ELPA_USE_FMA__
x3
=
_mm256_add_pd
(
x3
,
_mm256_FMSUBADD_pd
(
h1_real
,
q3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#else
x3
=
_mm256_add_pd
(
x3
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
q4
);
#ifdef __ELPA_USE_FMA__
x4
=
_mm256_add_pd
(
x4
,
_mm256_FMSUBADD_pd
(
h1_real
,
q4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#else
x4
=
_mm256_add_pd
(
x4
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#endif
tmp5
=
_mm256_mul_pd
(
h1_imag
,
q5
);
#ifdef __ELPA_USE_FMA__
x5
=
_mm256_add_pd
(
x5
,
_mm256_FMSUBADD_pd
(
h1_real
,
q5
,
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
)));
#else
x5
=
_mm256_add_pd
(
x5
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q5
),
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
)));
#endif
tmp6
=
_mm256_mul_pd
(
h1_imag
,
q6
);
#ifdef __ELPA_USE_FMA__
x6
=
_mm256_add_pd
(
x6
,
_mm256_FMSUBADD_pd
(
h1_real
,
q6
,
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
)));
#else
x6
=
_mm256_add_pd
(
x6
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q6
),
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
)));
#endif
}
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
0
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[
1
]);
h1_real
=
_mm256_xor_pd
(
h1_real
,
sign
);
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#else
x1
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#else
x2
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
x3
);
#ifdef __ELPA_USE_FMA__
x3
=
_mm256_FMADDSUB_pd
(
h1_real
,
x3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
));
#else
x3
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
x4
);
#ifdef __ELPA_USE_FMA__
x4
=
_mm256_FMADDSUB_pd
(
h1_real
,
x4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
));
#else
x4
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
));
#endif
tmp5
=
_mm256_mul_pd
(
h1_imag
,
x5
);
#ifdef __ELPA_USE_FMA__
x5
=
_mm256_FMADDSUB_pd
(
h1_real
,
x5
,
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
));
#else
x5
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x5
),
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
));
#endif
tmp6
=
_mm256_mul_pd
(
h1_imag
,
x6
);
#ifdef __ELPA_USE_FMA__
x6
=
_mm256_FMADDSUB_pd
(
h1_real
,
x6
,
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
));
#else
x6
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x6
),
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
));
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[
12
]);
q5
=
_mm256_load_pd
(
&
q_dbl
[
16
]);
q6
=
_mm256_load_pd
(
&
q_dbl
[
20
]);
q1
=
_mm256_add_pd
(
q1
,
x1
);
q2
=
_mm256_add_pd
(
q2
,
x2
);
q3
=
_mm256_add_pd
(
q3
,
x3
);
q4
=
_mm256_add_pd
(
q4
,
x4
);
q5
=
_mm256_add_pd
(
q5
,
x5
);
q6
=
_mm256_add_pd
(
q6
,
x6
);
_mm256_store_pd
(
&
q_dbl
[
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[
4
],
q2
);
_mm256_store_pd
(
&
q_dbl
[
8
],
q3
);
_mm256_store_pd
(
&
q_dbl
[
12
],
q4
);
_mm256_store_pd
(
&
q_dbl
[
16
],
q5
);
_mm256_store_pd
(
&
q_dbl
[
20
],
q6
);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
]);
q5
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
16
]);
q6
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
20
]);
// Author: Andreas Marek, MPCDF
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
q1
=
_mm256_add_pd
(
q1
,
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
q1
=
_mm256_add_pd
(
q1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
q2
=
_mm256_add_pd
(
q2
,
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
q2
=
_mm256_add_pd
(
q2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
x3
);
#ifdef __ELPA_USE_FMA__
q3
=
_mm256_add_pd
(
q3
,
_mm256_FMADDSUB_pd
(
h1_real
,
x3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#else
q3
=
_mm256_add_pd
(
q3
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
x4
);
#ifdef __ELPA_USE_FMA__
q4
=
_mm256_add_pd
(
q4
,
_mm256_FMADDSUB_pd
(
h1_real
,
x4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#else
q4
=
_mm256_add_pd
(
q4
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#endif
tmp5
=
_mm256_mul_pd
(
h1_imag
,
x5
);
#ifdef __ELPA_USE_FMA__
q5
=
_mm256_add_pd
(
q5
,
_mm256_FMADDSUB_pd
(
h1_real
,
x5
,
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
)));
#else
q5
=
_mm256_add_pd
(
q5
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x5
),
_mm256_shuffle_pd
(
tmp5
,
tmp5
,
0x5
)));
#endif
tmp6
=
_mm256_mul_pd
(
h1_imag
,
x6
);
#ifdef __ELPA_USE_FMA__
q6
=
_mm256_add_pd
(
q6
,
_mm256_FMADDSUB_pd
(
h1_real
,
x6
,
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
)));
#else
q6
=
_mm256_add_pd
(
q6
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x6
),
_mm256_shuffle_pd
(
tmp6
,
tmp6
,
0x5
)));
#endif
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
],
q2
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
],
q3
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
],
q4
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
16
],
q5
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
20
],
q6
);
}
}
static
__forceinline
void
hh_trafo_complex_kernel_8_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
)
{
double
*
q_dbl
=
(
double
*
)
q
;
double
*
hh_dbl
=
(
double
*
)
hh
;
__m256d
x1
,
x2
,
x3
,
x4
;
__m256d
q1
,
q2
,
q3
,
q4
;
__m256d
h1_real
,
h1_imag
;
__m256d
tmp1
,
tmp2
,
tmp3
,
tmp4
;
int
i
=
0
;
__m256d
sign
=
(
__m256d
)
_mm256_set_epi64x
(
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
);
x1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
x2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
x3
=
_mm256_load_pd
(
&
q_dbl
[
8
]);
x4
=
_mm256_load_pd
(
&
q_dbl
[
12
]);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
#ifndef __ELPA_USE_FMA__
// conjugate
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
]);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
q1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_add_pd
(
x1
,
_mm256_FMSUBADD_pd
(
h1_real
,
q1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
x1
=
_mm256_add_pd
(
x1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
q2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_add_pd
(
x2
,
_mm256_FMSUBADD_pd
(
h1_real
,
q2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
x2
=
_mm256_add_pd
(
x2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
q3
);
#ifdef __ELPA_USE_FMA__
x3
=
_mm256_add_pd
(
x3
,
_mm256_FMSUBADD_pd
(
h1_real
,
q3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#else
x3
=
_mm256_add_pd
(
x3
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
q4
);
#ifdef __ELPA_USE_FMA__
x4
=
_mm256_add_pd
(
x4
,
_mm256_FMSUBADD_pd
(
h1_real
,
q4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#else
x4
=
_mm256_add_pd
(
x4
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#endif
}
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
0
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[
1
]);
h1_real
=
_mm256_xor_pd
(
h1_real
,
sign
);
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#else
x1
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#else
x2
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
x3
);
#ifdef __ELPA_USE_FMA__
x3
=
_mm256_FMADDSUB_pd
(
h1_real
,
x3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
));
#else
x3
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
x4
);
#ifdef __ELPA_USE_FMA__
x4
=
_mm256_FMADDSUB_pd
(
h1_real
,
x4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
));
#else
x4
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
));
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[
12
]);
q1
=
_mm256_add_pd
(
q1
,
x1
);
q2
=
_mm256_add_pd
(
q2
,
x2
);
q3
=
_mm256_add_pd
(
q3
,
x3
);
q4
=
_mm256_add_pd
(
q4
,
x4
);
_mm256_store_pd
(
&
q_dbl
[
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[
4
],
q2
);
_mm256_store_pd
(
&
q_dbl
[
8
],
q3
);
_mm256_store_pd
(
&
q_dbl
[
12
],
q4
);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
q3
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
]);
q4
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
]);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
q1
=
_mm256_add_pd
(
q1
,
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
q1
=
_mm256_add_pd
(
q1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
q2
=
_mm256_add_pd
(
q2
,
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
q2
=
_mm256_add_pd
(
q2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
tmp3
=
_mm256_mul_pd
(
h1_imag
,
x3
);
#ifdef __ELPA_USE_FMA__
q3
=
_mm256_add_pd
(
q3
,
_mm256_FMADDSUB_pd
(
h1_real
,
x3
,
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#else
q3
=
_mm256_add_pd
(
q3
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x3
),
_mm256_shuffle_pd
(
tmp3
,
tmp3
,
0x5
)));
#endif
tmp4
=
_mm256_mul_pd
(
h1_imag
,
x4
);
#ifdef __ELPA_USE_FMA__
q4
=
_mm256_add_pd
(
q4
,
_mm256_FMADDSUB_pd
(
h1_real
,
x4
,
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#else
q4
=
_mm256_add_pd
(
q4
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x4
),
_mm256_shuffle_pd
(
tmp4
,
tmp4
,
0x5
)));
#endif
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
],
q2
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
8
],
q3
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
12
],
q4
);
}
}
static
__forceinline
void
hh_trafo_complex_kernel_4_AVX_1hv_double
(
double
complex
*
q
,
double
complex
*
hh
,
int
nb
,
int
ldq
)
{
double
*
q_dbl
=
(
double
*
)
q
;
double
*
hh_dbl
=
(
double
*
)
hh
;
__m256d
x1
,
x2
;
__m256d
q1
,
q2
;
__m256d
h1_real
,
h1_imag
;
__m256d
tmp1
,
tmp2
;
int
i
=
0
;
__m256d
sign
=
(
__m256d
)
_mm256_set_epi64x
(
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
,
0x8000000000000000
);
x1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
x2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
#ifndef __ELPA_USE_FMA__
// conjugate
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
q1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_add_pd
(
x1
,
_mm256_FMSUBADD_pd
(
h1_real
,
q1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
x1
=
_mm256_add_pd
(
x1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
q2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_add_pd
(
x2
,
_mm256_FMSUBADD_pd
(
h1_real
,
q2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
x2
=
_mm256_add_pd
(
x2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
q2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
}
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
0
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[
1
]);
h1_real
=
_mm256_xor_pd
(
h1_real
,
sign
);
h1_imag
=
_mm256_xor_pd
(
h1_imag
,
sign
);
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
x1
=
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#else
x1
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
x2
=
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#else
x2
=
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
));
#endif
q1
=
_mm256_load_pd
(
&
q_dbl
[
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[
4
]);
q1
=
_mm256_add_pd
(
q1
,
x1
);
q2
=
_mm256_add_pd
(
q2
,
x2
);
_mm256_store_pd
(
&
q_dbl
[
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[
4
],
q2
);
for
(
i
=
1
;
i
<
nb
;
i
++
)
{
h1_real
=
_mm256_broadcast_sd
(
&
hh_dbl
[
i
*
2
]);
h1_imag
=
_mm256_broadcast_sd
(
&
hh_dbl
[(
i
*
2
)
+
1
]);
q1
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
]);
q2
=
_mm256_load_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
]);
#include "config-f90.h"
tmp1
=
_mm256_mul_pd
(
h1_imag
,
x1
);
#ifdef __ELPA_USE_FMA__
q1
=
_mm256_add_pd
(
q1
,
_mm256_FMADDSUB_pd
(
h1_real
,
x1
,
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#else
q1
=
_mm256_add_pd
(
q1
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x1
),
_mm256_shuffle_pd
(
tmp1
,
tmp1
,
0x5
)));
#endif
tmp2
=
_mm256_mul_pd
(
h1_imag
,
x2
);
#ifdef __ELPA_USE_FMA__
q2
=
_mm256_add_pd
(
q2
,
_mm256_FMADDSUB_pd
(
h1_real
,
x2
,
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#else
q2
=
_mm256_add_pd
(
q2
,
_mm256_addsub_pd
(
_mm256_mul_pd
(
h1_real
,
x2
),
_mm256_shuffle_pd
(
tmp2
,
tmp2
,
0x5
)));
#endif
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
#include "../../general/precision_macros.h"
#include "elpa2_kernels_complex_avx-avx2_1hv_template.Xc"
#undef DOUBLE_PRECISION
#undef COMPLEXCASE
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
0
],
q1
);
_mm256_store_pd
(
&
q_dbl
[(
2
*
i
*
ldq
)
+
4
],
q2
);
}
}
src/elpa2/kernels/elpa2_kernels_complex_avx-avx2_1hv_single_precision.c
View file @
9c8470f1
...
...
@@ -42,539 +42,14 @@
// any derivatives of ELPA under the same license that we chose for
// the original distribution, the GNU Lesser General Public License.
//
// Author: Andreas Marek, MPCDF, based on the double precision case of A. Heinecke
//
#include "config-f90.h"
#include <complex.h>
#include <x86intrin.h>
#define __forceinline __attribute__((always_inline))
#ifdef HAVE_AVX2
#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_maddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_msubadd_ps(a,b,c)
#endif
#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_fmaddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_fmsubadd_ps(a,b,c)
#endif
#endif
//Forward declaration
static
__forceinline
void
hh_trafo_complex_kernel_12_AVX_1hv_single
(
float
complex
*
q
,
float
complex
*
hh
,
int
nb
,
int
ldq
);
static
__forceinline
void
hh_trafo_complex_kernel_8_AVX_1hv_single
(
float
complex
*
q
,
float
complex
*
hh
,
int
nb
,
int
ldq
);
static
__forceinline
void
hh_trafo_complex_kernel_4_AVX_1hv_single
(
float
complex
*
q
,
float
complex
*
hh
,
int
nb
,
int
ldq
);
/*
!f>#if defined(HAVE_AVX) || defined(HAVE_AVX2)
!f> interface
!f> subroutine single_hh_trafo_complex_avx_avx2_1hv_single(q, hh, pnb, pnq, pldq) &
!f> bind(C, name="single_hh_trafo_complex_avx_avx2_1hv_single")
!f> use, intrinsic :: iso_c_binding
!f> integer(kind=c_int) :: pnb, pnq, pldq
!f> ! complex(kind=c_float_complex) :: q(*)
!f> type(c_ptr), value :: q
!f> complex(kind=c_float_complex) :: hh(pnb,2)