Commit be4b61ae authored by Andreas Marek's avatar Andreas Marek
Browse files

Double precision AVX512 block6 kernel

parent 366d0f43
......@@ -61,32 +61,28 @@
// --------------------------------------------------------------------------------------------------
#include "config-f90.h"
#include <x86intrin.h>
#define __forceinline __attribute__((always_inline)) static
#ifdef HAVE_AVX2
#ifdef HAVE_AVX512
#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMA_pd(a,b,c) _mm256_macc_pd(a,b,c)
#define _mm256_NFMA_pd(a,b,c) _mm256_nmacc_pd(a,b,c)
#define _mm256_FMSUB_pd(a,b,c) _mm256_msub(a,b,c)
#endif
#define _mm512_FMA_pd(a,b,c) _mm512_fmadd_pd(a,b,c)
#define _mm512_NFMA_pd(a,b,c) _mm512_fnmadd_pd(a,b,c)
#define _mm512_FMSUB_pd(a,b,c) _mm512_fmsub_pd(a,b,c)
#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMA_pd(a,b,c) _mm256_fmadd_pd(a,b,c)
#define _mm256_NFMA_pd(a,b,c) _mm256_fnmadd_pd(a,b,c)
#define _mm256_FMSUB_pd(a,b,c) _mm256_fmsub_pd(a,b,c)
#endif
#endif
//Forward declaration
static void hh_trafo_kernel_4_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
//static void hh_trafo_kernel_4_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
static void hh_trafo_kernel_8_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
static void hh_trafo_kernel_16_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
static void hh_trafo_kernel_24_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
static void hh_trafo_kernel_32_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods);
void hexa_hh_trafo_real_avx512_6hv_double(double* q, double* hh, int* pnb, int* pnq, int* pldq, int* pldh);
/*
......@@ -115,22 +111,6 @@ void hexa_hh_trafo_real_avx512_6hv_double(double* q, double* hh, int* pnb, int*
// 6 householder vectors simultaneously
double scalarprods[15];
// scalarprods[0] = s_1_2;
// scalarprods[1] = s_1_3;
// scalarprods[2] = s_2_3;
// scalarprods[3] = s_1_4;
// scalarprods[4] = s_2_4;
// scalarprods[5] = s_3_4;
// scalarprods[6] = s_1_5;
// scalarprods[7] = s_2_5;
// scalarprods[8] = s_3_5;
// scalarprods[9] = s_4_5;
// scalarprods[10] = s_1_6;
// scalarprods[11] = s_2_6;
// scalarprods[12] = s_3_6;
// scalarprods[13] = s_4_6;
// scalarprods[14] = s_5_6;
scalarprods[0] = hh[(ldh+1)];
scalarprods[1] = hh[(ldh*2)+2];
scalarprods[2] = hh[(ldh*2)+1];
......@@ -226,987 +206,2647 @@ void hexa_hh_trafo_real_avx512_6hv_double(double* q, double* hh, int* pnb, int*
scalarprods[10] += hh[i-5] * hh[i+(ldh*5)];
}
// printf("s_1_2: %f\n", scalarprods[0]);
// printf("s_1_3: %f\n", scalarprods[1]);
// printf("s_2_3: %f\n", scalarprods[2]);
// printf("s_1_4: %f\n", scalarprods[3]);
// printf("s_2_4: %f\n", scalarprods[4]);
// printf("s_3_4: %f\n", scalarprods[5]);
// printf("s_1_5: %f\n", scalarprods[6]);
// printf("s_2_5: %f\n", scalarprods[7]);
// printf("s_3_5: %f\n", scalarprods[8]);
// printf("s_4_5: %f\n", scalarprods[9]);
// printf("s_1_6: %f\n", scalarprods[10]);
// printf("s_2_6: %f\n", scalarprods[11]);
// printf("s_3_6: %f\n", scalarprods[12]);
// printf("s_4_6: %f\n", scalarprods[13]);
// printf("s_5_6: %f\n", scalarprods[14]);
// Production level kernel calls with padding
#ifdef __AVX__
for (i = 0; i < nq-4; i+=8)
for (i = 0; i < nq-24; i+=32)
{
hh_trafo_kernel_8_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
hh_trafo_kernel_32_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
if (nq == i)
{
return;
}
else
if (nq-i == 24)
{
hh_trafo_kernel_4_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
hh_trafo_kernel_24_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
#else
for (i = 0; i < nq-2; i+=4)
{
hh_trafo_kernel_4_SSE_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
if (nq == i)
if (nq-i == 16)
{
return;
hh_trafo_kernel_16_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
else
else
{
hh_trafo_kernel_2_SSE_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
hh_trafo_kernel_8_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
#endif
}
#if 0
void hexa_hh_trafo_fast_(double* q, double* hh, int* pnb, int* pnq, int* pldq, int* pldh)
/**
* Unrolled kernel that computes
* 8 rows of Q simultaneously, a
* matrix vector product with two householder
* vectors + a rank 1 update is performed
*/
__forceinline void hh_trafo_kernel_8_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods)
{
/////////////////////////////////////////////////////
// Matrix Vector Multiplication, Q [8 x nb+3] * hh
// hh contains four householder vectors
/////////////////////////////////////////////////////
int i;
int nb = *pnb;
int nq = *pldq;
int ldq = *pldq;
int ldh = *pldh;
// calculating scalar products to compute
// 6 householder vectors simultaneously
double scalarprods[15];
__m512d a1_1 = _mm512_load_pd(&q[ldq*5]);
__m512d a2_1 = _mm512_load_pd(&q[ldq*4]);
__m512d a3_1 = _mm512_load_pd(&q[ldq*3]);
__m512d a4_1 = _mm512_load_pd(&q[ldq*2]);
__m512d a5_1 = _mm512_load_pd(&q[ldq]);
__m512d a6_1 = _mm512_load_pd(&q[0]);
// scalarprods[0] = s_1_2;
// scalarprods[1] = s_1_3;
// scalarprods[2] = s_2_3;
// scalarprods[3] = s_1_4;
// scalarprods[4] = s_2_4;
// scalarprods[5] = s_3_4;
// scalarprods[6] = s_1_5;
// scalarprods[7] = s_2_5;
// scalarprods[8] = s_3_5;
// scalarprods[9] = s_4_5;
// scalarprods[10] = s_1_6;
// scalarprods[11] = s_2_6;
// scalarprods[12] = s_3_6;
// scalarprods[13] = s_4_6;
// scalarprods[14] = s_5_6;
__m512d h_6_5 = _mm512_set1_pd(hh[(ldh*5)+1]);
__m512d h_6_4 = _mm512_set1_pd(hh[(ldh*5)+2]);
__m512d h_6_3 = _mm512_set1_pd(hh[(ldh*5)+3]);
__m512d h_6_2 = _mm512_set1_pd(hh[(ldh*5)+4]);
__m512d h_6_1 = _mm512_set1_pd(hh[(ldh*5)+5]);
scalarprods[0] = hh[(ldh+1)];
scalarprods[1] = hh[(ldh*2)+2];
scalarprods[2] = hh[(ldh*2)+1];
scalarprods[3] = hh[(ldh*3)+3];
scalarprods[4] = hh[(ldh*3)+2];
scalarprods[5] = hh[(ldh*3)+1];
scalarprods[6] = hh[(ldh*4)+4];
scalarprods[7] = hh[(ldh*4)+3];
scalarprods[8] = hh[(ldh*4)+2];
scalarprods[9] = hh[(ldh*4)+1];
scalarprods[10] = hh[(ldh*5)+5];
scalarprods[11] = hh[(ldh*5)+4];
scalarprods[12] = hh[(ldh*5)+3];
scalarprods[13] = hh[(ldh*5)+2];
scalarprods[14] = hh[(ldh*5)+1];
// register __m512d t1 = _mm512_FMA_pd(a5_1, h_6_5, a6_1);
__m512d t1 = _mm512_FMA_pd(a5_1, h_6_5, a6_1);
// calculate scalar product of first and fourth householder vector
// loop counter = 2
scalarprods[0] += hh[1] * hh[(2+ldh)];
scalarprods[2] += hh[(ldh)+1] * hh[2+(ldh*2)];
scalarprods[5] += hh[(ldh*2)+1] * hh[2+(ldh*3)];
scalarprods[9] += hh[(ldh*3)+1] * hh[2+(ldh*4)];
scalarprods[14] += hh[(ldh*4)+1] * hh[2+(ldh*5)];
t1 = _mm512_FMA_pd(a4_1, h_6_4, t1);
t1 = _mm512_FMA_pd(a3_1, h_6_3, t1);
t1 = _mm512_FMA_pd(a2_1, h_6_2, t1);
t1 = _mm512_FMA_pd(a1_1, h_6_1, t1);
// loop counter = 3
scalarprods[0] += hh[2] * hh[(3+ldh)];
scalarprods[2] += hh[(ldh)+2] * hh[3+(ldh*2)];
scalarprods[5] += hh[(ldh*2)+2] * hh[3+(ldh*3)];
scalarprods[9] += hh[(ldh*3)+2] * hh[3+(ldh*4)];
scalarprods[14] += hh[(ldh*4)+2] * hh[3+(ldh*5)];
__m512d h_5_4 = _mm512_set1_pd(hh[(ldh*4)+1]);
__m512d h_5_3 = _mm512_set1_pd(hh[(ldh*4)+2]);
__m512d h_5_2 = _mm512_set1_pd(hh[(ldh*4)+3]);
__m512d h_5_1 = _mm512_set1_pd(hh[(ldh*4)+4]);
scalarprods[1] += hh[1] * hh[3+(ldh*2)];
scalarprods[4] += hh[(ldh*1)+1] * hh[3+(ldh*3)];
scalarprods[8] += hh[(ldh*2)+1] * hh[3+(ldh*4)];
scalarprods[13] += hh[(ldh*3)+1] * hh[3+(ldh*5)];
// register __m512d v1 = _mm512_FMA_pd(a4_1, h_5_4, a5_1);
__m512d v1 = _mm512_FMA_pd(a4_1, h_5_4, a5_1);
// loop counter = 4
scalarprods[0] += hh[3] * hh[(4+ldh)];
scalarprods[2] += hh[(ldh)+3] * hh[4+(ldh*2)];
scalarprods[5] += hh[(ldh*2)+3] * hh[4+(ldh*3)];
scalarprods[9] += hh[(ldh*3)+3] * hh[4+(ldh*4)];
scalarprods[14] += hh[(ldh*4)+3] * hh[4+(ldh*5)];
v1 = _mm512_FMA_pd(a3_1, h_5_3, v1);
v1 = _mm512_FMA_pd(a2_1, h_5_2, v1);
v1 = _mm512_FMA_pd(a1_1, h_5_1, v1);
scalarprods[1] += hh[2] * hh[4+(ldh*2)];
scalarprods[4] += hh[(ldh*1)+2] * hh[4+(ldh*3)];
scalarprods[8] += hh[(ldh*2)+2] * hh[4+(ldh*4)];
scalarprods[13] += hh[(ldh*3)+2] * hh[4+(ldh*5)];
__m512d h_4_3 = _mm512_set1_pd(hh[(ldh*3)+1]);
__m512d h_4_2 = _mm512_set1_pd(hh[(ldh*3)+2]);
__m512d h_4_1 = _mm512_set1_pd(hh[(ldh*3)+3]);
scalarprods[3] += hh[1] * hh[4+(ldh*3)];
scalarprods[7] += hh[(ldh)+1] * hh[4+(ldh*4)];
scalarprods[12] += hh[(ldh*2)+1] * hh[4+(ldh*5)];
// register __m512d w1 = _mm512_FMA_pd(a3_1, h_4_3, a4_1);
__m512d w1 = _mm512_FMA_pd(a3_1, h_4_3, a4_1);
// loop counter = 5
scalarprods[0] += hh[4] * hh[(5+ldh)];
scalarprods[2] += hh[(ldh)+4] * hh[5+(ldh*2)];
scalarprods[5] += hh[(ldh*2)+4] * hh[5+(ldh*3)];
scalarprods[9] += hh[(ldh*3)+4] * hh[5+(ldh*4)];
scalarprods[14] += hh[(ldh*4)+4] * hh[5+(ldh*5)];
w1 = _mm512_FMA_pd(a2_1, h_4_2, w1);
w1 = _mm512_FMA_pd(a1_1, h_4_1, w1);
scalarprods[1] += hh[3] * hh[5+(ldh*2)];
scalarprods[4] += hh[(ldh*1)+3] * hh[5+(ldh*3)];
scalarprods[8] += hh[(ldh*2)+3] * hh[5+(ldh*4)];
scalarprods[13] += hh[(ldh*3)+3] * hh[5+(ldh*5)];
__m512d h_2_1 = _mm512_set1_pd(hh[ldh+1]);
__m512d h_3_2 = _mm512_set1_pd(hh[(ldh*2)+1]);
__m512d h_3_1 = _mm512_set1_pd(hh[(ldh*2)+2]);
scalarprods[3] += hh[2] * hh[5+(ldh*3)];
scalarprods[7] += hh[(ldh)+2] * hh[5+(ldh*4)];
scalarprods[12] += hh[(ldh*2)+2] * hh[5+(ldh*5)];
// register __m512d z1 = _mm512_FMA_pd(a2_1, h_3_2, a3_1);
__m512d z1 = _mm512_FMA_pd(a2_1, h_3_2, a3_1);
scalarprods[6] += hh[1] * hh[5+(ldh*4)];
scalarprods[11] += hh[(ldh)+1] * hh[5+(ldh*5)];
z1 = _mm512_FMA_pd(a1_1, h_3_1, z1);
// register __m512d y1 = _mm512_FMA_pd(a1_1, h_2_1, a2_1);
__m512d y1 = _mm512_FMA_pd(a1_1, h_2_1, a2_1);
#pragma ivdep
for (i = 6; i < nb; i++)
// register __m512d x1 = a1_1;
__m512d x1 = a1_1;
__m512d q1;
__m512d h1;
__m512d h2;
__m512d h3;
__m512d h4;
__m512d h5;
__m512d h6;
for(i = 6; i < nb; i++)
{
scalarprods[0] += hh[i-1] * hh[(i+ldh)];
scalarprods[2] += hh[(ldh)+i-1] * hh[i+(ldh*2)];
scalarprods[5] += hh[(ldh*2)+i-1] * hh[i+(ldh*3)];
scalarprods[9] += hh[(ldh*3)+i-1] * hh[i+(ldh*4)];
scalarprods[14] += hh[(ldh*4)+i-1] * hh[i+(ldh*5)];
h1 = _mm512_set1_pd(hh[i-5]);
q1 = _mm512_load_pd(&q[i*ldq]);
scalarprods[1] += hh[i-2] * hh[i+(ldh*2)];
scalarprods[4] += hh[(ldh*1)+i-2] * hh[i+(ldh*3)];
scalarprods[8] += hh[(ldh*2)+i-2] * hh[i+(ldh*4)];
scalarprods[13] += hh[(ldh*3)+i-2] * hh[i+(ldh*5)];
x1 = _mm512_FMA_pd(q1, h1, x1);
scalarprods[3] += hh[i-3] * hh[i+(ldh*3)];
scalarprods[7] += hh[(ldh)+i-3] * hh[i+(ldh*4)];
scalarprods[12] += hh[(ldh*2)+i-3] * hh[i+(ldh*5)];
h2 = _mm512_set1_pd(hh[ldh+i-4]);
scalarprods[6] += hh[i-4] * hh[i+(ldh*4)];
scalarprods[11] += hh[(ldh)+i-4] * hh[i+(ldh*5)];
y1 = _mm512_FMA_pd(q1, h2, y1);
h3 = _mm512_set1_pd(hh[(ldh*2)+i-3]);
scalarprods[10] += hh[i-5] * hh[i+(ldh*5)];
}
z1 = _mm512_FMA_pd(q1, h3, z1);
// printf("s_1_2: %f\n", scalarprods[0]);
// printf("s_1_3: %f\n", scalarprods[1]);
// printf("s_2_3: %f\n", scalarprods[2]);
// printf("s_1_4: %f\n", scalarprods[3]);
// printf("s_2_4: %f\n", scalarprods[4]);
// printf("s_3_4: %f\n", scalarprods[5]);
// printf("s_1_5: %f\n", scalarprods[6]);
// printf("s_2_5: %f\n", scalarprods[7]);
// printf("s_3_5: %f\n", scalarprods[8]);
// printf("s_4_5: %f\n", scalarprods[9]);
// printf("s_1_6: %f\n", scalarprods[10]);
// printf("s_2_6: %f\n", scalarprods[11]);
// printf("s_3_6: %f\n", scalarprods[12]);
// printf("s_4_6: %f\n", scalarprods[13]);
// printf("s_5_6: %f\n", scalarprods[14]);
h4 = _mm512_set1_pd(hh[(ldh*3)+i-2]);
// Production level kernel calls with padding
#ifdef __AVX__
for (i = 0; i < nq; i+=8)
{
hh_trafo_kernel_8_AVX512_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
}
#else
for (i = 0; i < nq; i+=4)
{
hh_trafo_kernel_4_SSE_6hv_double(&q[i], hh, nb, ldq, ldh, scalarprods);
w1 = _mm512_FMA_pd(q1, h4, w1);
h5 = _mm512_set1_pd(hh[(ldh*4)+i-1]);
v1 = _mm512_FMA_pd(q1, h5, v1);
h6 = _mm512_set1_pd(hh[(ldh*5)+i]);
t1 = _mm512_FMA_pd(q1, h6, t1);
}
#endif
}
#endif
/**
* Unrolled kernel that computes
* 8 rows of Q simultaneously, a
* matrix vector product with two householder
* vectors + a rank 1 update is performed
*/
__forceinline void hh_trafo_kernel_8_AVX512_6hv_double(double* q, double* hh, int nb, int ldq, int ldh, double* scalarprods)
{
/////////////////////////////////////////////////////
// Matrix Vector Multiplication, Q [8 x nb+3] * hh
// hh contains four householder vectors
/////////////////////////////////////////////////////
int i;
h1 = _mm512_set1_pd(hh[nb-5]);
q1 = _mm512_load_pd(&q[nb*ldq]);
__m256d a1_1 = _mm256_load_pd(&q[ldq*5]);
__m256d a2_1 = _mm256_load_pd(&q[ldq*4]);
__m256d a3_1 = _mm256_load_pd(&q[ldq*3]);
__m256d a4_1 = _mm256_load_pd(&q[ldq*2]);
__m256d a5_1 = _mm256_load_pd(&q[ldq]);
__m256d a6_1 = _mm256_load_pd(&q[0]);
x1 = _mm512_FMA_pd(q1, h1, x1);
__m256d h_6_5 = _mm256_broadcast_sd(&hh[(ldh*5)+1]);
__m256d h_6_4 = _mm256_broadcast_sd(&hh[(ldh*5)+2]);
__m256d h_6_3 = _mm256_broadcast_sd(&hh[(ldh*5)+3]);
__m256d h_6_2 = _mm256_broadcast_sd(&hh[(ldh*5)+4]);
__m256d h_6_1 = _mm256_broadcast_sd(&hh[(ldh*5)+5]);
#ifdef __ELPA_USE_FMA__
register __m256d t1 = _mm256_FMA_pd(a5_1, h_6_5, a6_1);
t1 = _mm256_FMA_pd(a4_1, h_6_4, t1);
t1 = _mm256_FMA_pd(a3_1, h_6_3, t1);
t1 = _mm256_FMA_pd(a2_1, h_6_2, t1);
t1 = _mm256_FMA_pd(a1_1, h_6_1, t1);
#else
register __m256d t1 = _mm256_add_pd(a6_1, _mm256_mul_pd(a5_1, h_6_5));
t1 = _mm256_add_pd(t1, _mm256_mul_pd(a4_1, h_6_4));
t1 = _mm256_add_pd(t1, _mm256_mul_pd(a3_1, h_6_3));
t1 = _mm256_add_pd(t1, _mm256_mul_pd(a2_1, h_6_2));
t1 = _mm256_add_pd(t1, _mm256_mul_pd(a1_1, h_6_1));
#endif
__m256d h_5_4 = _mm256_broadcast_sd(&hh[(ldh*4)+1]);
__m256d h_5_3 = _mm256_broadcast_sd(&hh[(ldh*4)+2]);
__m256d h_5_2 = _mm256_broadcast_sd(&hh[(ldh*4)+3]);
__m256d h_5_1 = _mm256_broadcast_sd(&hh[(ldh*4)+4]);
#ifdef __ELPA_USE_FMA__
register __m256d v1 = _mm256_FMA_pd(a4_1, h_5_4, a5_1);
v1 = _mm256_FMA_pd(a3_1, h_5_3, v1);
v1 = _mm256_FMA_pd(a2_1, h_5_2, v1);
v1 = _mm256_FMA_pd(a1_1, h_5_1, v1);
#else
register __m256d v1 = _mm256_add_pd(a5_1, _mm256_mul_pd(a4_1, h_5_4));
v1 = _mm256_add_pd(v1, _mm256_mul_pd(a3_1, h_5_3));
v1 = _mm256_add_pd(v1, _mm256_mul_pd(a2_1, h_5_2));
v1 = _mm256_add_pd(v1, _mm256_mul_pd(a1_1, h_5_1));
#endif
__m256d h_4_3 = _mm256_broadcast_sd(&hh[(ldh*3)+1]);
__m256d h_4_2 = _mm256_broadcast_sd(&hh[(ldh*3)+2]);
__m256d h_4_1 = _mm256_broadcast_sd(&hh[(ldh*3)+3]);
#ifdef __ELPA_USE_FMA__
register __m256d w1 = _mm256_FMA_pd(a3_1, h_4_3, a4_1);
w1 = _mm256_FMA_pd(a2_1, h_4_2, w1);
w1 = _mm256_FMA_pd(a1_1, h_4_1, w1);
#else
register __m256d w1 = _mm256_add_pd(a4_1, _mm256_mul_pd(a3_1, h_4_3));
w1 = _mm256_add_pd(w1, _mm256_mul_pd(a2_1, h_4_2));
w1 = _mm256_add_pd(w1, _mm256_mul_pd(a1_1, h_4_1));
#endif
__m256d h_2_1 = _mm256_broadcast_sd(&hh[ldh+1]);
__m256d h_3_2 = _mm256_broadcast_sd(&hh[(ldh*2)+1]);
__m256d h_3_1 = _mm256_broadcast_sd(&hh[(ldh*2)+2]);
#ifdef __ELPA_USE_FMA__
register __m256d z1 = _mm256_FMA_pd(a2_1, h_3_2, a3_1);
z1 = _mm256_FMA_pd(a1_1, h_3_1, z1);
register __m256d y1 = _mm256_FMA_pd(a1_1, h_2_1, a2_1);
#else
register __m256d z1 = _mm256_add_pd(a3_1, _mm256_mul_pd(a2_1, h_3_2));
z1 = _mm256_add_pd(z1, _mm256_mul_pd(a1_1, h_3_1));
register __m256d y1 = _mm256_add_pd(a2_1, _mm256_mul_pd(a1_1, h_2_1));
#endif
register __m256d x1 = a1_1;
h2 = _mm512_set1_pd(hh[ldh+nb-4]);
y1 = _mm512_FMA_pd(q1, h2, y1);
__m256d a1_2 = _mm256_load_pd(&q[(ldq*5)+4]);
__m256d a2_2 = _mm256_load_pd(&q[(ldq*4)+4]);
__m256d a3_2 = _mm256_load_pd(&q[(ldq*3)+4]);
__m256d a4_2 = _mm256_load_pd(&q[(ldq*2)+4]);
__m256d a5_2 = _mm256_load_pd(&q[(ldq)+4]);
__m256d a6_2 = _mm256_load_pd(&q[4]);
h3 = _mm512_set1_pd(hh[(ldh*2)+nb-3]);
#ifdef __ELPA_USE_FMA__
register __m256d t2 = _mm256_FMA_pd(a5_2, h_6_5, a6_2);
t2 = _mm256_FMA_pd(a4_2, h_6_4, t2);
t2 = _mm256_FMA_pd(a3_2, h_6_3, t2);
t2 = _mm256_FMA_pd(a2_2, h_6_2, t2);
t2 = _mm256_FMA_pd(a1_2, h_6_1, t2);
register __m256d v2 = _mm256_FMA_pd(a4_2, h_5_4, a5_2);
v2 = _mm256_FMA_pd(a3_2, h_5_3, v2);
v2 = _mm256_FMA_pd(a2_2, h_5_2, v2);
v2 = _mm256_FMA_pd(a1_2, h_5_1, v2);
register __m256d w2 = _mm256_FMA_pd(a3_2, h_4_3, a4_2);
w2 = _mm256_FMA_pd(a2_2, h_4_2, w2);
w2 = _mm256_FMA_pd(a1_2, h_4_1, w2);
register __m256d z2 = _mm256_FMA_pd(a2_2, h_3_2, a3_2);
z2 = _mm256_FMA_pd(a1_2, h_3_1, z2);
register __m256d y2 = _mm256_FMA_pd(a1_2, h_2_1, a2_2);
#else
register __m256d t2 = _mm256_add_pd(a6_2, _mm256_mul_pd(a5_2, h_6_5));
t2 = _mm256_add_pd(t2, _mm256_mul_pd(a4_2, h_6_4));
t2 = _mm256_add_pd(t2, _mm256_mul_pd(a3_2, h_6_3));
t2 = _mm256_add_pd(t2, _mm256_mul_pd(a2_2, h_6_2));
t2 = _mm256_add_pd(t2, _mm256_mul_pd(a1_2, h_6_1));
register __m256d v2 = _mm256_add_pd(a5_2, _mm256_mul_pd(a4_2, h_5_4));
v2 = _mm256_add_pd(v2, _mm256_mul_pd(a3_2, h_5_3));
v2 = _mm256_add_pd(v2, _mm256_mul_pd(a2_2, h_5_2));
v2 = _mm256_add_pd(v2, _mm256_mul_pd(a1_2, h_5_1));
register __m256d w2 = _mm256_add_pd(a4_2, _mm256_mul_pd(a3_2, h_4_3));
w2 = _mm256_add_pd(w2, _mm256_mul_pd(a2_2, h_4_2));
w2 = _mm256_add_pd(w2, _mm256_mul_pd(a1_2, h_4_1));
register __m256d z2 = _mm256_add_pd(a3_2, _mm256_mul_pd(a2_2, h_3_2));
z2 = _mm256_add_pd(z2, _mm256_mul_pd(a1_2, h_3_1));
register __m256d y2 = _mm256_add_pd(a2_2, _mm256_mul_pd(a1_2, h_2_1));
#endif
register __m256d x2 = a1_2;
z1 = _mm512_FMA_pd(q1, h3, z1);
__m256d q1;
__m256d q2;
h4 = _mm512_set1_pd(hh[(ldh*3)+nb-2]);
__m256d h1;
__m256d h2;
__m256d h3;
__m256d h4;
__m256d h5;
__m256d h6;
w1 = _mm512_FMA_pd(q1, h4, w1);
for(i = 6; i < nb; i++)
{
h1 = _mm256_broadcast_sd(&hh[i-5]);
q1 = _mm256_load_pd(&q[i*ldq]);
q2 = _mm256_load_pd(&q[(i*ldq)+4]);
#ifdef __ELPA_USE_FMA__
x1 = _mm256_FMA_pd(q1, h1, x1);
x2 = _mm256_FMA_pd(q2, h1, x2);
#else
x1 = _mm256_add_pd(x1, _mm256_mul_pd(q1,h1));
x2 = _mm256_add_pd(x2, _mm256_mul_pd(q2,h1));
#endif
h2 = _mm256_broadcast_sd(&hh[ldh+i-4]);
#ifdef __ELPA_USE_FMA__
y1 = _mm256_FMA_pd(q1, h2, y1);
y2 = _mm256_FMA_pd(q2, h2, y2);
#else
y1 = _mm256_add_pd(y1, _mm256_mul_pd(q1,h2));
y2 = _mm256_add_pd(y2, _mm256_mul_pd(q2,h2));
#endif
h3 = _mm256_broadcast_sd(&hh[(ldh*2)+i-3]);
#ifdef __ELPA_USE_FMA__
z1 = _mm256_FMA_pd(q1, h3, z1);
z2 = _mm256_FMA_pd(q2, h3, z2);
#else
z1 = _mm256_add_pd(z1, _mm256_mul_pd(q1,h3));
z2 = _mm256_add_pd(z2, _mm256_mul_pd(q2,h3));
#endif
h4 = _mm256_broadcast_sd(&hh[(ldh*3)+i-2]);
#ifdef __ELPA_USE_FMA__
w1 = _mm256_FMA_pd(q1, h4, w1);
w2 = _mm256_FMA_pd(q2, h4, w2);
#else
w1 = _mm256_add_pd(w1, _mm256_mul_pd(q1,h4));
w2 = _mm256_add_pd(w2, _mm256_mul_pd(q2,h4));
#endif
h5 = _mm256_broadcast_sd(&hh[(ldh*4)+i-1]);
#ifdef __ELPA_USE_FMA__
v1 = _mm256_FMA_pd(q1, h5, v1);
v2 = _mm256_FMA_pd(q2, h5, v2);
#else
v1 = _mm256_add_pd(v1, _mm256_mul_pd(q1,h5));
v2 = _mm256_add_pd(v2, _mm256_mul_pd(q2,h5));
#endif
h6 = _mm256_broadcast_sd(&hh[(ldh*5)+i]);
#ifdef __ELPA_USE_FMA__
t1 = _mm256_FMA_pd(q1, h6, t1);
t2 = _mm256_FMA_pd(q2, h6, t2);
#else
t1 = _mm256_add_pd(t1, _mm256_mul_pd(q1,h6));
t2 = _mm256_add_pd(t2, _mm256_mul_pd(q2,h6));
#endif
}
h5 = _mm512_set1_pd(hh[(ldh*4)+nb-1]);
h1 = _mm256_broadcast_sd(&hh[nb-5]);
q1 = _mm256_load_pd(&q[nb*ldq]);
q2 = _mm256_load_pd(&q[(nb*ldq)+4]);
#ifdef __ELPA_USE_FMA__
x1 = _mm256_FMA_pd(q1, h1, x1);
x2 = _mm256_FMA_pd(q2, h1, x2);
#else
x1 = _mm256_add_pd(x1, _mm256_mul_pd(q1,h1));
x2 = _mm256_add_pd(x2, _mm256_mul_pd(q2,h1));
#endif
h2 = _mm256_broadcast_sd(&hh[ldh+nb-4]);
#ifdef __ELPA_USE_FMA__
y1 = _mm256_FMA_pd(q1, h2, y1);
y2 = _mm256_FMA_pd(q2, h2, y2);
#else
y1 = _mm256_add_pd(y1, _mm256_mul_pd(q1,h2));
y2 = _mm256_add_pd(y2, _mm256_mul_pd(q2,h2));
#endif
h3 = _mm256_broadcast_sd(&hh[(ldh*2)+nb-3]);
#ifdef __ELPA_USE_FMA__