Commit 0f665949 authored by Andreas Marek's avatar Andreas Marek
Browse files

Single precision AVX/AVX BLOCK2 complex kernel

parent 9b5a1df8
......@@ -70,14 +70,14 @@
#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_maddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_msubadd_pd(a,b,c)
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_maddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_msubadd_ps(a,b,c)
#endif
#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_fmaddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_fmsubadd_pd(a,b,c)
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_fmaddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_fmsubadd_ps(a,b,c)
#endif
#endif
......@@ -85,110 +85,12 @@
extern "C" {
//Forward declaration
static __forceinline void hh_trafo_complex_kernel_8_AVX_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s);
static __forceinline void hh_trafo_complex_kernel_6_AVX_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s);
static __forceinline void hh_trafo_complex_kernel_4_AVX_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s);
static __forceinline void hh_trafo_complex_kernel_2_AVX_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s);
static __forceinline void hh_trafo_complex_kernel_8_AVX_2hv_single(std::complex<float>* q, std::complex<float>* hh, int nb, int ldq, int ldh, std::complex<float> s, std::complex<float> s1);
//static __forceinline void hh_trafo_complex_kernel_6_AVX_2hv_single(std::complex<float>* q, std::complex<float>* hh, int nb, int ldq, int ldh, std::complex<float> s, std::complex<float> s1);
static __forceinline void hh_trafo_complex_kernel_4_AVX_2hv_single(std::complex<float>* q, std::complex<float>* hh, int nb, int ldq, int ldh, std::complex<float> s, std::complex<float> s1);
//static __forceinline void hh_trafo_complex_kernel_2_AVX_2hv_single(std::complex<float>* q, std::complex<float>* hh, int nb, int ldq, int ldh, std::complex<float> s, std::complex<float> s1);
#if 0
static __forceinline void hh_trafo_complex_kernel_4_C_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s)
{
std::complex<double> x1;
std::complex<double> x2;
std::complex<double> x3;
std::complex<double> x4;
std::complex<double> y1;
std::complex<double> y2;
std::complex<double> y3;
std::complex<double> y4;
std::complex<double> h1;
std::complex<double> h2;
std::complex<double> tau1;
std::complex<double> tau2;
int i=0;
x1 = q[ldq+0];
x2 = q[ldq+1];
x3 = q[ldq+2];
x4 = q[ldq+3];
h2 = conj(hh[ldh+1]);
y1 = q[0] + (x1*h2);
y2 = q[1] + (x2*h2);
y3 = q[2] + (x3*h2);
y4 = q[3] + (x4*h2);
for (i = 2; i < nb; i++)
{
h1 = conj(hh[i-1]);
h2 = conj(hh[ldh+i]);
x1 += (q[(i*ldq)+0] * h1);
y1 += (q[(i*ldq)+0] * h2);
x2 += (q[(i*ldq)+1] * h1);
y2 += (q[(i*ldq)+1] * h2);
x3 += (q[(i*ldq)+2] * h1);
y3 += (q[(i*ldq)+2] * h2);
x4 += (q[(i*ldq)+3] * h1);
y4 += (q[(i*ldq)+3] * h2);
}
h1 = conj(hh[nb-1]);
x1 += (q[(nb*ldq)+0] * h1);
x2 += (q[(nb*ldq)+1] * h1);
x3 += (q[(nb*ldq)+2] * h1);
x4 += (q[(nb*ldq)+3] * h1);
tau1 = hh[0];
tau2 = hh[ldh];
h1 = (-1.0)*tau1;
x1 *= h1;
x2 *= h1;
x3 *= h1;
x4 *= h1;
h1 = (-1.0)*tau2;
h2 = (-1.0)*tau2;
h2 *= s;
y1 = y1*h1 +x1*h2;
y2 = y2*h1 +x2*h2;
y3 = y3*h1 +x3*h2;
y4 = y4*h1 +x4*h2;
q[0] += y1;
q[1] += y2;
q[2] += y3;
q[3] += y4;
h2 = hh[ldh+1];
q[ldq+0] += (x1 + (y1*h2));
q[ldq+1] += (x2 + (y2*h2));
q[ldq+2] += (x3 + (y3*h2));
q[ldq+3] += (x4 + (y4*h2));
for (i = 2; i < nb; i++)
{
h1 = hh[i-1];
h2 = hh[ldh+i];
q[(i*ldq)+0] += ((x1*h1) + (y1*h2));
q[(i*ldq)+1] += ((x2*h1) + (y2*h2));
q[(i*ldq)+2] += ((x3*h1) + (y3*h2));
q[(i*ldq)+3] += ((x4*h1) + (y4*h2));
}
h1 = hh[nb-1];
q[(nb*ldq)+0] += (x1*h1);
q[(nb*ldq)+1] += (x2*h1);
q[(nb*ldq)+2] += (x3*h1);
q[(nb*ldq)+3] += (x4*h1);
}
#endif
void double_hh_trafo_complex_avx_avx2_2hv_single_(std::complex<double>* q, std::complex<double>* hh, int* pnb, int* pnq, int* pldq, int* pldh)
void double_hh_trafo_complex_avx_avx2_2hv_single_(std::complex<float>* q, std::complex<float>* hh, int* pnb, int* pnq, int* pldq, int* pldh)
{
int i;
int nb = *pnb;
......@@ -196,1286 +98,737 @@ void double_hh_trafo_complex_avx_avx2_2hv_single_(std::complex<double>* q, std::
int ldq = *pldq;
int ldh = *pldh;
std::complex<double> s = conj(hh[(ldh)+1])*1.0;
std::complex<float> s = conj(hh[(ldh)+1])*1.0f;
for (i = 2; i < nb; i++)
{
s += hh[i-1] * conj(hh[(i+ldh)]);
}
#if 1
for (i = 0; i < nq-4; i+=8)
{
hh_trafo_complex_kernel_8_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s);
hh_trafo_complex_kernel_8_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s , s);
}
if (nq-i > 0)
{
hh_trafo_complex_kernel_4_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s);
hh_trafo_complex_kernel_4_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s, s);
}
#else
for (i = 0; i < nq-4; i+=6)
{
hh_trafo_complex_kernel_6_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s);
}
if (nq-i > 2)
{
hh_trafo_complex_kernel_4_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s);
}
else if (nq-i > 0)
{
hh_trafo_complex_kernel_2_AVX_2hv_single(&q[i], hh, nb, ldq, ldh, s);
}
#endif
}
static __forceinline void hh_trafo_complex_kernel_8_AVX_2hv_single(std::complex<double>* q, std::complex<double>* hh, int nb, int ldq, int ldh, std::complex<double> s)
static __forceinline void hh_trafo_complex_kernel_8_AVX_2hv_single(std::complex<float>* q, std::complex<float>* hh, int nb, int ldq, int ldh, std::complex<float> s, std::complex<float> s1)
{
double* q_dbl = (double*)q;
double* hh_dbl = (double*)hh;
double* s_dbl = (double*)(&s);
__m256d x1, x2, x3, x4;
__m256d y1, y2, y3, y4;
__m256d q1, q2, q3, q4;
__m256d h1_real, h1_imag, h2_real, h2_imag;
__m256d tmp1, tmp2, tmp3, tmp4;
int i=0;
__m256d sign = (__m256d)_mm256_set_epi64x(0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000);
x1 = _mm256_load_pd(&q_dbl[(2*ldq)+0]);
x2 = _mm256_load_pd(&q_dbl[(2*ldq)+4]);
x3 = _mm256_load_pd(&q_dbl[(2*ldq)+8]);
x4 = _mm256_load_pd(&q_dbl[(2*ldq)+12]);
h2_real = _mm256_broadcast_sd(&hh_dbl[(ldh+1)*2]);
h2_imag = _mm256_broadcast_sd(&hh_dbl[((ldh+1)*2)+1]);
float* q_dbl = (float*)q;
float* hh_dbl = (float*)hh;
float* s_dbl = (float*)(&s);
__m256 x1, x2, x3, x4;
__m256 y1, y2, y3, y4;
__m256 q1, q2, q3, q4;
__m256 h1_real, h1_imag, h2_real, h2_imag;
__m256 tmp1, tmp2, tmp3, tmp4;
volatile int i=0;
__m256 sign = (__m256)_mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
x1 = _mm256_load_ps(&q_dbl[(2*ldq)+0]);
x2 = _mm256_load_ps(&q_dbl[(2*ldq)+8]);
// x3 = _mm256_load_ps(&q_dbl[(2*ldq)+8]);
// x4 = _mm256_load_ps(&q_dbl[(2*ldq)+12]);
h2_real = _mm256_broadcast_ss(&hh_dbl[(ldh+1)*2]);
h2_imag = _mm256_broadcast_ss(&hh_dbl[((ldh+1)*2)+1]);
#ifndef __ELPA_USE_FMA__
// conjugate
h2_imag = _mm256_xor_pd(h2_imag, sign);
h2_imag = _mm256_xor_ps(h2_imag, sign);
#endif
y1 = _mm256_load_pd(&q_dbl[0]);
y2 = _mm256_load_pd(&q_dbl[4]);
y3 = _mm256_load_pd(&q_dbl[8]);
y4 = _mm256_load_pd(&q_dbl[12]);
y1 = _mm256_load_ps(&q_dbl[0]);
y2 = _mm256_load_ps(&q_dbl[8]);
// y3 = _mm256_load_pd(&q_dbl[8]);
// y4 = _mm256_load_pd(&q_dbl[12]);
tmp1 = _mm256_mul_pd(h2_imag, x1);
tmp1 = _mm256_mul_ps(h2_imag, x1);
#ifdef __ELPA_USE_FMA__
y1 = _mm256_add_pd(y1, _mm256_FMSUBADD_pd(h2_real, x1, _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
y1 = _mm256_add_ps(y1, _mm256_FMSUBADD_ps(h2_real, x1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#else
y1 = _mm256_add_pd(y1, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x1), _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
y1 = _mm256_add_ps(y1, _mm256_addsub_ps( _mm256_mul_ps(h2_real, x1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#endif
tmp2 = _mm256_mul_pd(h2_imag, x2);
tmp2 = _mm256_mul_ps(h2_imag, x2);
#ifdef __ELPA_USE_FMA__
y2 = _mm256_add_pd(y2, _mm256_FMSUBADD_pd(h2_real, x2, _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
y2 = _mm256_add_ps(y2, _mm256_FMSUBADD_ps(h2_real, x2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#else
y2 = _mm256_add_pd(y2, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x2), _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
y2 = _mm256_add_ps(y2, _mm256_addsub_ps( _mm256_mul_ps(h2_real, x2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#endif
tmp3 = _mm256_mul_pd(h2_imag, x3);
// tmp3 = _mm256_mul_pd(h2_imag, x3);
#ifdef __ELPA_USE_FMA__
y3 = _mm256_add_pd(y3, _mm256_FMSUBADD_pd(h2_real, x3, _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// y3 = _mm256_add_pd(y3, _mm256_FMSUBADD_pd(h2_real, x3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#else
y3 = _mm256_add_pd(y3, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x3), _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// y3 = _mm256_add_pd(y3, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#endif
tmp4 = _mm256_mul_pd(h2_imag, x4);
// tmp4 = _mm256_mul_pd(h2_imag, x4);
#ifdef __ELPA_USE_FMA__
y4 = _mm256_add_pd(y4, _mm256_FMSUBADD_pd(h2_real, x4, _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// y4 = _mm256_add_pd(y4, _mm256_FMSUBADD_pd(h2_real, x4, _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#else
y4 = _mm256_add_pd(y4, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x4), _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// y4 = _mm256_add_pd(y4, _mm256_addsub_pd( _mm256_mul_pd(h2_real, x4), _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#endif
for (i = 2; i < nb; i++)
{
q1 = _mm256_load_pd(&q_dbl[(2*i*ldq)+0]);
q2 = _mm256_load_pd(&q_dbl[(2*i*ldq)+4]);
q3 = _mm256_load_pd(&q_dbl[(2*i*ldq)+8]);
q4 = _mm256_load_pd(&q_dbl[(2*i*ldq)+12]);
q1 = _mm256_load_ps(&q_dbl[(2*i*ldq)+0]);
q2 = _mm256_load_ps(&q_dbl[(2*i*ldq)+8]);
// q3 = _mm256_load_pd(&q_dbl[(2*i*ldq)+8]);
// q4 = _mm256_load_pd(&q_dbl[(2*i*ldq)+12]);
h1_real = _mm256_broadcast_sd(&hh_dbl[(i-1)*2]);
h1_imag = _mm256_broadcast_sd(&hh_dbl[((i-1)*2)+1]);
h1_real = _mm256_broadcast_ss(&hh_dbl[(i-1)*2]);
h1_imag = _mm256_broadcast_ss(&hh_dbl[((i-1)*2)+1]);
#ifndef __ELPA_USE_FMA__
// conjugate
h1_imag = _mm256_xor_pd(h1_imag, sign);
h1_imag = _mm256_xor_ps(h1_imag, sign);
#endif
tmp1 = _mm256_mul_pd(h1_imag, q1);
tmp1 = _mm256_mul_ps(h1_imag, q1);
#ifdef __ELPA_USE_FMA__
x1 = _mm256_add_pd(x1, _mm256_FMSUBADD_pd(h1_real, q1, _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
x1 = _mm256_add_ps(x1, _mm256_FMSUBADD_ps(h1_real, q1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#else
x1 = _mm256_add_pd(x1, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q1), _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
x1 = _mm256_add_ps(x1, _mm256_addsub_ps( _mm256_mul_ps(h1_real, q1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#endif
tmp2 = _mm256_mul_pd(h1_imag, q2);
tmp2 = _mm256_mul_ps(h1_imag, q2);
#ifdef __ELPA_USE_FMA__
x2 = _mm256_add_pd(x2, _mm256_FMSUBADD_pd(h1_real, q2, _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
x2 = _mm256_add_ps(x2, _mm256_FMSUBADD_ps(h1_real, q2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#else
x2 = _mm256_add_pd(x2, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q2), _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
x2 = _mm256_add_ps(x2, _mm256_addsub_ps( _mm256_mul_ps(h1_real, q2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#endif
tmp3 = _mm256_mul_pd(h1_imag, q3);
// tmp3 = _mm256_mul_pd(h1_imag, q3);
#ifdef __ELPA_USE_FMA__
x3 = _mm256_add_pd(x3, _mm256_FMSUBADD_pd(h1_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// x3 = _mm256_add_pd(x3, _mm256_FMSUBADD_pd(h1_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#else
x3 = _mm256_add_pd(x3, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// x3 = _mm256_add_pd(x3, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#endif
tmp4 = _mm256_mul_pd(h1_imag, q4);
// tmp4 = _mm256_mul_pd(h1_imag, q4);
#ifdef __ELPA_USE_FMA__
x4 = _mm256_add_pd(x4, _mm256_FMSUBADD_pd(h1_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// x4 = _mm256_add_pd(x4, _mm256_FMSUBADD_pd(h1_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#else
x4 = _mm256_add_pd(x4, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// x4 = _mm256_add_pd(x4, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#endif
h2_real = _mm256_broadcast_sd(&hh_dbl[(ldh+i)*2]);
h2_imag = _mm256_broadcast_sd(&hh_dbl[((ldh+i)*2)+1]);
h2_real = _mm256_broadcast_ss(&hh_dbl[(ldh+i)*2]);
h2_imag = _mm256_broadcast_ss(&hh_dbl[((ldh+i)*2)+1]);
#ifndef __ELPA_USE_FMA__
// conjugate
h2_imag = _mm256_xor_pd(h2_imag, sign);
h2_imag = _mm256_xor_ps(h2_imag, sign);
#endif
tmp1 = _mm256_mul_pd(h2_imag, q1);
tmp1 = _mm256_mul_ps(h2_imag, q1);
#ifdef __ELPA_USE_FMA__
y1 = _mm256_add_pd(y1, _mm256_FMSUBADD_pd(h2_real, q1, _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
y1 = _mm256_add_ps(y1, _mm256_FMSUBADD_ps(h2_real, q1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#else
y1 = _mm256_add_pd(y1, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q1), _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
y1 = _mm256_add_ps(y1, _mm256_addsub_ps( _mm256_mul_ps(h2_real, q1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#endif
tmp2 = _mm256_mul_pd(h2_imag, q2);
tmp2 = _mm256_mul_ps(h2_imag, q2);
#ifdef __ELPA_USE_FMA__
y2 = _mm256_add_pd(y2, _mm256_FMSUBADD_pd(h2_real, q2, _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
y2 = _mm256_add_ps(y2, _mm256_FMSUBADD_ps(h2_real, q2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#else
y2 = _mm256_add_pd(y2, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q2), _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
y2 = _mm256_add_ps(y2, _mm256_addsub_ps( _mm256_mul_ps(h2_real, q2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#endif
tmp3 = _mm256_mul_pd(h2_imag, q3);
// tmp3 = _mm256_mul_pd(h2_imag, q3);
#ifdef __ELPA_USE_FMA__
y3 = _mm256_add_pd(y3, _mm256_FMSUBADD_pd(h2_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// y3 = _mm256_add_pd(y3, _mm256_FMSUBADD_pd(h2_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#else
y3 = _mm256_add_pd(y3, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// y3 = _mm256_add_pd(y3, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#endif
tmp4 = _mm256_mul_pd(h2_imag, q4);
// tmp4 = _mm256_mul_pd(h2_imag, q4);
#ifdef __ELPA_USE_FMA__
y4 = _mm256_add_pd(y4, _mm256_FMSUBADD_pd(h2_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// y4 = _mm256_add_pd(y4, _mm256_FMSUBADD_pd(h2_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#else
y4 = _mm256_add_pd(y4, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// y4 = _mm256_add_pd(y4, _mm256_addsub_pd( _mm256_mul_pd(h2_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#endif
}
h1_real = _mm256_broadcast_sd(&hh_dbl[(nb-1)*2]);
h1_imag = _mm256_broadcast_sd(&hh_dbl[((nb-1)*2)+1]);
h1_real = _mm256_broadcast_ss(&hh_dbl[(nb-1)*2]);
h1_imag = _mm256_broadcast_ss(&hh_dbl[((nb-1)*2)+1]);
#ifndef __ELPA_USE_FMA__
// conjugate
h1_imag = _mm256_xor_pd(h1_imag, sign);
h1_imag = _mm256_xor_ps(h1_imag, sign);
#endif
q1 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+0]);
q2 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+4]);
q3 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+8]);
q4 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+12]);
q1 = _mm256_load_ps(&q_dbl[(2*nb*ldq)+0]);
q2 = _mm256_load_ps(&q_dbl[(2*nb*ldq)+8]);
// q3 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+8]);
// q4 = _mm256_load_pd(&q_dbl[(2*nb*ldq)+12]);
tmp1 = _mm256_mul_pd(h1_imag, q1);
tmp1 = _mm256_mul_ps(h1_imag, q1);
#ifdef __ELPA_USE_FMA__
x1 = _mm256_add_pd(x1, _mm256_FMSUBADD_pd(h1_real, q1, _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
x1 = _mm256_add_ps(x1, _mm256_FMSUBADD_ps(h1_real, q1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#else
x1 = _mm256_add_pd(x1, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q1), _mm256_shuffle_pd(tmp1, tmp1, 0x5)));
x1 = _mm256_add_ps(x1, _mm256_addsub_ps( _mm256_mul_ps(h1_real, q1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1)));
#endif
tmp2 = _mm256_mul_pd(h1_imag, q2);
tmp2 = _mm256_mul_ps(h1_imag, q2);
#ifdef __ELPA_USE_FMA__
x2 = _mm256_add_pd(x2, _mm256_FMSUBADD_pd(h1_real, q2, _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
x2 = _mm256_add_ps(x2, _mm256_FMSUBADD_ps(h1_real, q2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#else
x2 = _mm256_add_pd(x2, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q2), _mm256_shuffle_pd(tmp2, tmp2, 0x5)));
x2 = _mm256_add_ps(x2, _mm256_addsub_ps( _mm256_mul_ps(h1_real, q2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1)));
#endif
tmp3 = _mm256_mul_pd(h1_imag, q3);
// tmp3 = _mm256_mul_pd(h1_imag, q3);
#ifdef __ELPA_USE_FMA__
x3 = _mm256_add_pd(x3, _mm256_FMSUBADD_pd(h1_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// x3 = _mm256_add_pd(x3, _mm256_FMSUBADD_pd(h1_real, q3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#else
x3 = _mm256_add_pd(x3, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0x5)));
// x3 = _mm256_add_pd(x3, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1)));
#endif
tmp4 = _mm256_mul_pd(h1_imag, q4);
// tmp4 = _mm256_mul_pd(h1_imag, q4);
#ifdef __ELPA_USE_FMA__
x4 = _mm256_add_pd(x4, _mm256_FMSUBADD_pd(h1_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// x4 = _mm256_add_pd(x4, _mm256_FMSUBADD_pd(h1_real, q4, _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#else
x4 = _mm256_add_pd(x4, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0x5)));
// x4 = _mm256_add_pd(x4, _mm256_addsub_pd( _mm256_mul_pd(h1_real, q4), _mm256_shuffle_pd(tmp4, tmp4, 0xb1)));
#endif
h1_real = _mm256_broadcast_sd(&hh_dbl[0]);
h1_imag = _mm256_broadcast_sd(&hh_dbl[1]);
h1_real = _mm256_xor_pd(h1_real, sign);
h1_imag = _mm256_xor_pd(h1_imag, sign);
h1_real = _mm256_broadcast_ss(&hh_dbl[0]);
h1_imag = _mm256_broadcast_ss(&hh_dbl[1]);
h1_real = _mm256_xor_ps(h1_real, sign);
h1_imag = _mm256_xor_ps(h1_imag, sign);
tmp1 = _mm256_mul_pd(h1_imag, x1);
tmp1 = _mm256_mul_ps(h1_imag, x1);
#ifdef __ELPA_USE_FMA__
x1 = _mm256_FMADDSUB_pd(h1_real, x1, _mm256_shuffle_pd(tmp1, tmp1, 0x5));
x1 = _mm256_FMADDSUB_ps(h1_real, x1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#else
x1 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x1), _mm256_shuffle_pd(tmp1, tmp1, 0x5));
x1 = _mm256_addsub_ps( _mm256_mul_ps(h1_real, x1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#endif
tmp2 = _mm256_mul_pd(h1_imag, x2);
tmp2 = _mm256_mul_ps(h1_imag, x2);
#ifdef __ELPA_USE_FMA__
x2 = _mm256_FMADDSUB_pd(h1_real, x2, _mm256_shuffle_pd(tmp2, tmp2, 0x5));
x2 = _mm256_FMADDSUB_ps(h1_real, x2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1));
#else
x2 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x2), _mm256_shuffle_pd(tmp2, tmp2, 0x5));
x2 = _mm256_addsub_ps( _mm256_mul_ps(h1_real, x2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1));
#endif
tmp3 = _mm256_mul_pd(h1_imag, x3);
// tmp3 = _mm256_mul_pd(h1_imag, x3);
#ifdef __ELPA_USE_FMA__
x3 = _mm256_FMADDSUB_pd(h1_real, x3, _mm256_shuffle_pd(tmp3, tmp3, 0x5));
// x3 = _mm256_FMADDSUB_pd(h1_real, x3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1));
#else
x3 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x3), _mm256_shuffle_pd(tmp3, tmp3, 0x5));
// x3 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1));
#endif
tmp4 = _mm256_mul_pd(h1_imag, x4);
// tmp4 = _mm256_mul_pd(h1_imag, x4);
#ifdef __ELPA_USE_FMA__
x4 = _mm256_FMADDSUB_pd(h1_real, x4, _mm256_shuffle_pd(tmp4, tmp4, 0x5));
// x4 = _mm256_FMADDSUB_pd(h1_real, x4, _mm256_shuffle_pd(tmp4, tmp4, 0xb1));
#else
x4 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x4), _mm256_shuffle_pd(tmp4, tmp4, 0x5));
// x4 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, x4), _mm256_shuffle_pd(tmp4, tmp4, 0xb1));
#endif
h1_real = _mm256_broadcast_sd(&hh_dbl[ldh*2]);
h1_imag = _mm256_broadcast_sd(&hh_dbl[(ldh*2)+1]);
h2_real = _mm256_broadcast_sd(&hh_dbl[ldh*2]);
h2_imag = _mm256_broadcast_sd(&hh_dbl[(ldh*2)+1]);
h1_real = _mm256_broadcast_ss(&hh_dbl[ldh*2]);
h1_imag = _mm256_broadcast_ss(&hh_dbl[(ldh*2)+1]);
h2_real = _mm256_broadcast_ss(&hh_dbl[ldh*2]);
h2_imag = _mm256_broadcast_ss(&hh_dbl[(ldh*2)+1]);
h1_real = _mm256_xor_pd(h1_real, sign);
h1_imag = _mm256_xor_pd(h1_imag, sign);
h2_real = _mm256_xor_pd(h2_real, sign);
h2_imag = _mm256_xor_pd(h2_imag, sign);
h1_real = _mm256_xor_ps(h1_real, sign);
h1_imag = _mm256_xor_ps(h1_imag, sign);
h2_real = _mm256_xor_ps(h2_real, sign);
h2_imag = _mm256_xor_ps(h2_imag, sign);
__m128d tmp_s_128 = _mm_loadu_pd(s_dbl);
tmp2 = _mm256_broadcast_pd(&tmp_s_128);
tmp1 = _mm256_mul_pd(h2_imag, tmp2);
//carefull here
// __m128u tmp_s_128 = _mm_loadu_pd(s_dbl); // load 1 complex value (2 double, i.e. 16 bytes)
__m128 tmp_s_128 = _mm_loadu_ps(s_dbl);
// tmp2 = _mm256_broadcast_pd(&tmp_s_128); // broad cast the 1 complex , i.e. double it
tmp2 = _mm256_broadcast_ps(&tmp_s_128); // broad cast the 1 complex , i.e. double it
tmp1 = _mm256_mul_ps(h2_imag, tmp2); // multiply hh2_img with the complex
#ifdef __ELPA_USE_FMA__
tmp2 = _mm256_FMADDSUB_pd(h2_real, tmp2, _mm256_shuffle_pd(tmp1, tmp1, 0x5));
tmp2 = _mm256_FMADDSUB_ps(h2_real, tmp2, _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#else
tmp2 = _mm256_addsub_pd( _mm256_mul_pd(h2_real, tmp2), _mm256_shuffle_pd(tmp1, tmp1, 0x5));
tmp2 = _mm256_addsub_ps( _mm256_mul_ps(h2_real, tmp2), _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#endif
_mm_storeu_pd(s_dbl, _mm256_castpd256_pd128(tmp2));
h2_real = _mm256_broadcast_sd(&s_dbl[0]);
h2_imag = _mm256_broadcast_sd(&s_dbl[1]);
//careful here
// _mm_storeu_pd(s_dbl, _mm256_castpd256_pd128(tmp2));
_mm_storeu_ps(s_dbl, _mm256_castps256_ps128(tmp2));
h2_real = _mm256_broadcast_ss(&s_dbl[0]);
h2_imag = _mm256_broadcast_ss(&s_dbl[1]);
tmp1 = _mm256_mul_pd(h1_imag, y1);
tmp1 = _mm256_mul_ps(h1_imag, y1);
#ifdef __ELPA_USE_FMA__
y1 = _mm256_FMADDSUB_pd(h1_real, y1, _mm256_shuffle_pd(tmp1, tmp1, 0x5));
y1 = _mm256_FMADDSUB_ps(h1_real, y1, _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#else
y1 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, y1), _mm256_shuffle_pd(tmp1, tmp1, 0x5));
y1 = _mm256_addsub_ps( _mm256_mul_ps(h1_real, y1), _mm256_shuffle_ps(tmp1, tmp1, 0xb1));
#endif
tmp2 = _mm256_mul_pd(h1_imag, y2);
tmp2 = _mm256_mul_ps(h1_imag, y2);
#ifdef __ELPA_USE_FMA__
y2 = _mm256_FMADDSUB_pd(h1_real, y2, _mm256_shuffle_pd(tmp2, tmp2, 0x5));
y2 = _mm256_FMADDSUB_ps(h1_real, y2, _mm256_shuffle_ps(tmp2, tmp2, 0xb1));
#else
y2 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, y2), _mm256_shuffle_pd(tmp2, tmp2, 0x5));
y2 = _mm256_addsub_ps( _mm256_mul_ps(h1_real, y2), _mm256_shuffle_ps(tmp2, tmp2, 0xb1));
#endif
tmp3 = _mm256_mul_pd(h1_imag, y3);
// tmp3 = _mm256_mul_pd(h1_imag, y3);
#ifdef __ELPA_USE_FMA__
y3 = _mm256_FMADDSUB_pd(h1_real, y3, _mm256_shuffle_pd(tmp3, tmp3, 0x5));
// y3 = _mm256_FMADDSUB_pd(h1_real, y3, _mm256_shuffle_pd(tmp3, tmp3, 0xb1));
#else
y3 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, y3), _mm256_shuffle_pd(tmp3, tmp3, 0x5));
// y3 = _mm256_addsub_pd( _mm256_mul_pd(h1_real, y3), _mm256_shuffle_pd(tmp3, tmp3, 0xb1));
#endif
tmp4 = _mm256_mul_pd(h1_imag, y4);
// tmp4 = _mm256_mul_pd(h1_imag, y4);
#ifdef __ELPA_USE_FMA__
y4 = _mm256_FMADDSUB_pd(h1_real, y4, _mm256_shuffle_pd(tmp4, tmp4, 0x5));