complex_128bit_256bit_512bit_BLOCK_template.c 157 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
//    This file is part of ELPA.
//
//    The ELPA library was originally created by the ELPA consortium,
//    consisting of the following organizations:
//
//    - Max Planck Computing and Data Facility (MPCDF), formerly known as
//      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
//    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
//      Informatik,
//    - Technische Universität München, Lehrstuhl für Informatik mit
//      Schwerpunkt Wissenschaftliches Rechnen ,
//    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
//    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
//      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
//      and
//    - IBM Deutschland GmbH
//
//
//    This particular source code file contains additions, changes and
//    enhancements authored by Intel Corporation which is not part of
//    the ELPA consortium.
//
//    More information can be found here:
//    http://elpa.mpcdf.mpg.de/
//
//    ELPA is free software: you can redistribute it and/or modify
//    it under the terms of the version 3 of the license of the
//    GNU Lesser General Public License as published by the Free
//    Software Foundation.
//
//    ELPA is distributed in the hope that it will be useful,
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
//    GNU Lesser General Public License for more details.
//
//    You should have received a copy of the GNU Lesser General Public License
//    along with ELPA. If not, see <http://www.gnu.org/licenses/>
//
//    ELPA reflects a substantial effort on the part of the original
//    ELPA consortium, and we ask you to respect the spirit of the
//    license that we chose: i.e., please contribute any changes you
//    may have back to the original ELPA library distribution, and keep
//    any derivatives of ELPA under the same license that we chose for
//    the original distribution, the GNU Lesser General Public License.
//
// Author: Andreas Marek, MPCDF, based on the double precision case of A. Heinecke
//
#include "config-f90.h"

#define CONCAT_8ARGS(a, b, c, d, e, f, g, h) CONCAT2_8ARGS(a, b, c, d, e, f, g, h)
#define CONCAT2_8ARGS(a, b, c, d, e, f, g, h) a ## b ## c ## d ## e ## f ## g ## h

#define CONCAT_7ARGS(a, b, c, d, e, f, g) CONCAT2_7ARGS(a, b, c, d, e, f, g)
#define CONCAT2_7ARGS(a, b, c, d, e, f, g) a ## b ## c ## d ## e ## f ## g

#define CONCAT_6ARGS(a, b, c, d, e, f) CONCAT2_6ARGS(a, b, c, d, e, f)
#define CONCAT2_6ARGS(a, b, c, d, e, f) a ## b ## c ## d ## e ## f

#define CONCAT_5ARGS(a, b, c, d, e) CONCAT2_5ARGS(a, b, c, d, e)
#define CONCAT2_5ARGS(a, b, c, d, e) a ## b ## c ## d ## e

#define CONCAT_4ARGS(a, b, c, d) CONCAT2_4ARGS(a, b, c, d)
#define CONCAT2_4ARGS(a, b, c, d) a ## b ## c ## d

#define CONCAT_3ARGS(a, b, c) CONCAT2_3ARGS(a, b, c)
#define CONCAT2_3ARGS(a, b, c) a ## b ## c

//define instruction set numbers
#define SSE_128 128
70
#define AVX_256 256
71
72
#define NEON_ARCH64_128 1285

73
#if VEC_SET == SSE_128 || VEC_SET == AVX_256 || VEC_SET == 512
74
75
76
77
#include <x86intrin.h>
#ifdef BLOCK2
#include <pmmintrin.h>
#endif
78
79
80

#define __forceinline __attribute__((always_inline))

81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#endif


#include <complex.h>

#include <stdio.h>
#include <stdlib.h>

#ifdef BLOCK2
#define PREFIX double
#define BLOCK 2
#endif

#ifdef BLOCK1
#define PREFIX single
#define BLOCK 1
#endif

#if VEC_SET == SSE_128
#define SIMD_SET SSE
#endif

103
104
105
106
107
108
#if VEC_SET == AVX_256
#define SIMD_SET AVX_AVX2
#endif

#if VEC_SET == SSE_128

109
110
111
112
113
114
#ifdef DOUBLE_PRECISION_COMPLEX
#define offset 2
#define __SIMD_DATATYPE __m128d
#define _SIMD_LOAD _mm_load_pd
#define _SIMD_LOADU _mm_loadu_pd
#define _SIMD_STORE _mm_store_pd
115
#define _SIMD_STOREU _mm_storeu_pd
116
117
118
119
120
121
#define _SIMD_MUL _mm_mul_pd
#define _SIMD_ADD _mm_add_pd
#define _SIMD_XOR _mm_xor_pd
#define _SIMD_ADDSUB _mm_addsub_pd
#define _SIMD_SHUFFLE _mm_shuffle_pd
#define _SHUFFLE _MM_SHUFFLE2(0,1)
122
123
124

#ifdef __ELPA_USE_FMA__
#define _SIMD_FMSUBADD _mm_maddsub_pd
125
#endif
126
127
#endif /* DOUBLE_PRECISION_COMPLEX */

128
129
130
131
132
133
#ifdef SINGLE_PRECISION_COMPLEX
#define offset 4
#define __SIMD_DATATYPE __m128
#define _SIMD_LOAD _mm_load_ps
#define _SIMD_LOADU _mm_loadu_ps
#define _SIMD_STORE _mm_store_ps
134
#define _SIMD_STOREU _mm_storeu_ps
135
136
137
138
139
140
#define _SIMD_MUL _mm_mul_ps
#define _SIMD_ADD _mm_add_ps
#define _SIMD_XOR _mm_xor_ps
#define _SIMD_ADDSUB _mm_addsub_ps
#define _SIMD_SHUFFLE _mm_shuffle_ps
#define _SHUFFLE 0xb1
141
142
143

#ifdef __ELPA_USE_FMA__
#define _SIMD_FMSUBADD _mm_maddsub_ps
144
145
#endif

146
147
#endif /* SINGLE_PRECISION_COMPLEX */

148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256

#ifdef DOUBLE_PRECISION_COMPLEX
#define offset 4
#define __SIMD_DATATYPE __m256d
#define _SIMD_LOAD _mm256_load_pd
#define _SIMD_LOADU 1
#define _SIMD_STORE _mm256_store_pd
#define _SIMD_STOREU 1
#define _SIMD_MUL _mm256_mul_pd
#define _SIMD_ADD _mm256_add_pd
#define _SIMD_XOR _mm256_xor_pd
#define _SIMD_BROADCAST _mm256_broadcast_sd
163
#define _SIMD_SET1 _mm256_set1_pd
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#define _SIMD_ADDSUB _mm256_addsub_pd
#define _SIMD_SHUFFLE _mm256_shuffle_pd
#define _SHUFFLE 0x5
#ifdef HAVE_AVX2

#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_maddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_msubadd_pd(a,b,c)
#endif

#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_pd(a,b,c) _mm256_fmaddsub_pd(a,b,c)
#define _mm256_FMSUBADD_pd(a,b,c) _mm256_fmsubadd_pd(a,b,c)
#endif

#define _SIMD_FMADDSUB _mm256_FMADDSUB_pd
#define _SIMD_FMSUBADD _mm256_FMSUBADD_pd
Andreas Marek's avatar
Andreas Marek committed
183
#endif /* HAVE_AVX2 */
184
185
186
187
188
189
190
191
192
193
194
195
196
197

#endif /* DOUBLE_PRECISION_COMPLEX */

#ifdef SINGLE_PRECISION_COMPLEX
#define offset 8
#define __SIMD_DATATYPE __m256
#define _SIMD_LOAD _mm256_load_ps
#define _SIMD_LOADU 1
#define _SIMD_STORE _mm256_store_ps
#define _SIMD_STOREU 1
#define _SIMD_MUL _mm256_mul_ps
#define _SIMD_ADD _mm256_add_ps
#define _SIMD_XOR _mm256_xor_ps
#define _SIMD_BROADCAST  _mm256_broadcast_ss
198
#define _SIMD_SET1 _mm256_set1_ps
199
#define _SIMD_ADDSUB _mm256_addsub_ps
200
#define _SIMD_SHUFFLE _mm256_shuffle_ps
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
#define _SHUFFLE 0xb1

#ifdef HAVE_AVX2

#ifdef __FMA4__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_maddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_msubadd_ps(a,b,c)
#endif

#ifdef __AVX2__
#define __ELPA_USE_FMA__
#define _mm256_FMADDSUB_ps(a,b,c) _mm256_fmaddsub_ps(a,b,c)
#define _mm256_FMSUBADD_ps(a,b,c) _mm256_fmsubadd_ps(a,b,c)
#endif

#define _SIMD_FMADDSUB _mm256_FMADDSUB_ps
#define _SIMD_FMSUBADD _mm256_FMSUBADD_ps
Andreas Marek's avatar
Andreas Marek committed
219
#endif /* HAVE_AVX2 */
220
221
222
223
224
225
226

#endif /* SINGLE_PRECISION_COMPLEX */

#endif /* VEC_SET == AVX_256 */



227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
#define __forceinline __attribute__((always_inline))

#ifdef HAVE_SSE_INTRINSICS
#undef __AVX__
#endif

#ifdef DOUBLE_PRECISION_COMPLEX
#define WORD_LENGTH double
#define DATA_TYPE double complex
#define DATA_TYPE_PTR double complex*
#define DATA_TYPE_REAL double
#define DATA_TYPE_REAL_PTR double*
#endif

#ifdef SINGLE_PRECISION_COMPLEX
#define WORD_LENGTH single
#define DATA_TYPE float complex
#define DATA_TYPE_PTR float complex*
#define DATA_TYPE_REAL float
#define DATA_TYPE_REAL_PTR float*
#endif


#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 6
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 12
#endif
#endif /* VEC_SET  == SSE_128 */

261
262
263
264
265
266
267
268
269
270
271
#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 12
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 24
#endif
#endif /* VEC_SET  == AVX_256 */

272
273
274
275
276
277
278
279
280
//Forward declaration
static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq 
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif

281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 5
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 10
#endif
#endif /* VEC_SET  == SSE_128 */


#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 10
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 20
#endif
#endif /* VEC_SET  == AVX_256 */


static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif


314
315
316
317
318
319
320
321
322
323
#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 4
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 8
#endif
#endif /* VEC_SET  == SSE_128 */
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369


#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 8
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 16
#endif
#endif /* VEC_SET  == AVX_256 */


static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif

#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 3
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 6
#endif
#endif /* VEC_SET  == SSE_128 */


#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 6
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 12
#endif
#endif /* VEC_SET  == AVX_256 */


370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif

#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 4
#endif
#endif /* VEC_SET  == SSE_128 */
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430

#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 4
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 8
#endif
#endif /* VEC_SET  == AVX_256 */


static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif

#if VEC_SET  == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 1
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 2
#endif
#endif /* VEC_SET  == SSE_128 */

#if VEC_SET  == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#undef ROW_LENGTH 
#define ROW_LENGTH 4
#endif
#endif /* VEC_SET  == AVX_256 */

431
432
433
434
435
436
437
438
static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH)(DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		                       );
#endif
#ifdef BLOCK2
                                       ,int ldh, DATA_TYPE s);
#endif

439

440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
/*
!f>#ifdef HAVE_SSE_INTRINSICS
!f> interface
!f>   subroutine single_hh_trafo_complex_SSE_1hv_double(q, hh, pnb, pnq, pldq) &
!f>                             bind(C, name="single_hh_trafo_complex_SSE_1hv_double")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq
!f>     ! complex(kind=c_double_complex)     :: q(*)
!f>     type(c_ptr), value                   :: q
!f>     complex(kind=c_double_complex)     :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

/*
!f>#ifdef HAVE_SSE_INTRINSICS
!f> interface
!f>   subroutine single_hh_trafo_complex_SSE_1hv_single(q, hh, pnb, pnq, pldq) &
!f>                             bind(C, name="single_hh_trafo_complex_SSE_1hv_single")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq
!f>     ! complex(kind=c_float_complex)   :: q(*)
!f>     type(c_ptr), value                :: q
!f>     complex(kind=c_float_complex)   :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

470

471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
/*
!f>#if defined(HAVE_AVX) || defined(HAVE_AVX2)
!f> interface
!f>   subroutine single_hh_trafo_complex_AVX_AVX2_1hv_double(q, hh, pnb, pnq, pldq) &
!f>                             bind(C, name="single_hh_trafo_complex_AVX_AVX2_1hv_double")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq
!f>     ! complex(kind=c_double_complex)     :: q(*)
!f>     type(c_ptr), value                   :: q
!f>     complex(kind=c_double_complex)       :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

/*
!f>#if defined(HAVE_AVX) || defined(HAVE_AVX2)
!f> interface
!f>   subroutine single_hh_trafo_complex_AVX_AVX2_1hv_single(q, hh, pnb, pnq, pldq) &
!f>                             bind(C, name="single_hh_trafo_complex_AVX_AVX2_1hv_single")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq
!f>     ! complex(kind=c_float_complex)   :: q(*)
!f>     type(c_ptr), value              :: q
!f>     complex(kind=c_float_complex)   :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
/*
!f>#ifdef HAVE_SSE_INTRINSICS
!f> interface
!f>   subroutine double_hh_trafo_complex_SSE_2hv_double(q, hh, pnb, pnq, pldq, pldh) &
!f>                             bind(C, name="double_hh_trafo_complex_SSE_2hv_double")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq, pldh
!f>     ! complex(kind=c_double_complex)     :: q(*)
!f>     type(c_ptr), value                   :: q
!f>     complex(kind=c_double_complex)     :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

/*
!f>#ifdef HAVE_SSE_INTRINSICS
!f> interface
!f>   subroutine double_hh_trafo_complex_SSE_2hv_single(q, hh, pnb, pnq, pldq, pldh) &
!f>                             bind(C, name="double_hh_trafo_complex_SSE_2hv_single")
!f>     use, intrinsic :: iso_c_binding
!f>     integer(kind=c_int)     :: pnb, pnq, pldq, pldh
!f>     ! complex(kind=c_float_complex)   :: q(*)
!f>     type(c_ptr), value                :: q
!f>     complex(kind=c_float_complex)   :: hh(pnb,2)
!f>   end subroutine
!f> end interface
!f>#endif
*/

531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
void CONCAT_7ARGS(PREFIX,_hh_trafo_complex_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int* pnb, int* pnq, int* pldq
#ifdef BLOCK1
		  )
#endif
#ifdef BLOCK2
                  ,int* pldh)
#endif
{

     int i, worked_on;
     int nb = *pnb;
     int nq = *pldq;
     int ldq = *pldq;
#ifdef BLOCK2
     int ldh = *pldh;

     DATA_TYPE s = conj(hh[(ldh)+1])*1.0;

     for (i = 2; i < nb; i++)
     {
             s += hh[i-1] * conj(hh[(i+ldh)]);
     }
#endif

     worked_on = 0;
556

557
558
#ifdef BLOCK1

559
#if VEC_SET == SSE_128
560
561
562
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#define STEP_SIZE 6
563
#define UPPER_BOUND 5
564
565
566
567
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#define STEP_SIZE 12
568
569
570
571
572
573
574
575
576
577
578
579
580
581
#define UPPER_BOUND 10
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#define STEP_SIZE 12
#define UPPER_BOUND 10
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 24
#define STEP_SIZE 24
#define UPPER_BOUND 20
582
#endif
583
584
#endif /* VEC_SET == AVX_256 */

585
586
587
588
589
590
591
592
593
594
        for (i = 0; i < nq - UPPER_BOUND; i+= STEP_SIZE)
        {

            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }
        if (nq == i) {
          return;
        }

595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 5
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 10
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 10
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 20
#endif
#endif /* VEC_SET == AVX_256 */

        if (nq-i == ROW_LENGTH)
        {
            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }


#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 16
#endif
#endif /* VEC_SET == AVX_256 */

        if (nq-i == ROW_LENGTH)
        {
            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }

#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 3
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#endif
#endif /* VEC_SET == AVX_256 */

        if (nq-i == ROW_LENGTH)
        {
            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }

#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
685
686
687
688
689
690
691
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#endif
692
693
694
#endif /* VEC_SET == AVX_256 */


695
696
697
698
699
700
        if (nq-i == ROW_LENGTH)
        {
            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }

701
702
703
704
705
706
707
708
709
710
711
#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 1
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
712
713
714
715
716
717
718
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
719
#endif /* VEC_SET == AVX_256 */
720
721
722
723
724
        if (nq-i == ROW_LENGTH)
        {
            CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq);
	    worked_on += ROW_LENGTH;
        }
725

726
727
728
729
#endif /* BLOCK1 */

#ifdef BLOCK2

730
#if VEC_SET == SSE_128
731
732
733
734
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#define STEP_SIZE 4
735
#define UPPER_BOUND 3
736
737
738
739
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#define STEP_SIZE 8
740
741
742
743
744
745
746
747
748
749
#define UPPER_BOUND 6
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#define STEP_SIZE 8
#define UPPER_BOUND 6
750
#endif
751
752
753
754
755
756
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 16
#define STEP_SIZE 16
#define UPPER_BOUND 12
#endif
#endif /* VEC_SET == AVX_256 */
757
758
759
760
761
762
763
764
765
766
767

    for (i = 0; i < nq - UPPER_BOUND; i+=STEP_SIZE)
    {
         CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq, ldh, s);
	 worked_on +=ROW_LENGTH;
    }
 
    if (nq == i)
    {
      return;
    }
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795

#if VEC_SET == SSE_128
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 3
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#endif
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#endif
#endif /* VEC_SET == AVX_256 */

    if (nq-i == ROW_LENGTH)
    {
        CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq, ldh, s);
        worked_on += ROW_LENGTH;
    }

#if VEC_SET == SSE_128
796
797
798
799
800
801
802
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
803
804
805
806
807
808
809
810
811
812
813
814
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 8
#endif
#endif /* VEC_SET == AVX_256 */

815
816
817
818
819
820
    if (nq-i == ROW_LENGTH)
    {
        CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq, ldh, s);
        worked_on += ROW_LENGTH;
    }

821
#if VEC_SET == SSE_128
822
823
824
825
826
827
828
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 1
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
829
830
831
832
833
834
835
836
837
838
839
840
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#undef ROW_LENGTH
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 2
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 4
#endif
#endif /* VEC_SET == AVX_256 */

841
842
843
844
845
846
847
848
849
850
851
852
853
854
    if (nq-i == ROW_LENGTH)
    {
        CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (&q[i], hh, nb, ldq, ldh, s);
        worked_on += ROW_LENGTH;
    }
#endif /* BLOCK2 */

//#ifdef WITH_DEBUG
    if (worked_on != nq)
    {
      printf("Error in complex SIMD_SET BLOCK BLOCK kernel %d %d\n", worked_on, nq);
      abort();
    }
//#endif
855
856
857
858


}

859
#if VEC_SET == SSE_128
860
861
862
863
864
865
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 6
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#endif
866
867
868
869
870
871
872
873
874
875
876
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
#define ROW_LENGTH 12
#endif
#ifdef SINGLE_PRECISION_COMPLEX
#define ROW_LENGTH 24
#endif
#endif /* VEC_SET == AVX_256 */

877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
static __forceinline void CONCAT_8ARGS(hh_trafo_complex_kernel_,ROW_LENGTH,_,SIMD_SET,_,BLOCK,hv_,WORD_LENGTH) (DATA_TYPE_PTR q, DATA_TYPE_PTR hh, int nb, int ldq
#ifdef BLOCK1
		)
#endif
#ifdef BLOCK2
                ,int ldh, DATA_TYPE s)
#endif
{

    DATA_TYPE_REAL_PTR q_dbl = (DATA_TYPE_REAL_PTR)q;
    DATA_TYPE_REAL_PTR hh_dbl = (DATA_TYPE_REAL_PTR)hh;
#ifdef BLOCK2
    DATA_TYPE_REAL_PTR s_dbl = (DATA_TYPE_REAL_PTR)(&s);
#endif

    __SIMD_DATATYPE x1, x2, x3, x4, x5, x6;
    __SIMD_DATATYPE q1, q2, q3, q4, q5, q6;
#ifdef BLOCK2
    __SIMD_DATATYPE y1, y2, y3, y4, y5, y6;
    __SIMD_DATATYPE h2_real, h2_imag;
#endif
    __SIMD_DATATYPE h1_real, h1_imag;
    __SIMD_DATATYPE tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
    int i=0;

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
    __SIMD_DATATYPE sign = (__SIMD_DATATYPE)_mm_set_epi64x(0x8000000000000000, 0x8000000000000000);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
    __SIMD_DATATYPE sign = (__SIMD_DATATYPE)_mm_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000);
#endif
#endif /* VEC_SET == SSE_128 */

911
912
913
914
915
916
917
918
919
#if VEC_SET == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
    __SIMD_DATATYPE sign = (__SIMD_DATATYPE)_mm256_set_epi64x(0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
    __SIMD_DATATYPE sign = (__SIMD_DATATYPE)_mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
#endif
#endif /* VEC_SET == AVX_256 */

920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
#ifdef BLOCK2
     x1 = _SIMD_LOAD(&q_dbl[(2*ldq)+0]);
     x2 = _SIMD_LOAD(&q_dbl[(2*ldq)+offset]);
     x3 = _SIMD_LOAD(&q_dbl[(2*ldq)+2*offset]);
     x4 = _SIMD_LOAD(&q_dbl[(2*ldq)+3*offset]);
     x5 = _SIMD_LOAD(&q_dbl[(2*ldq)+4*offset]);
     x6 = _SIMD_LOAD(&q_dbl[(2*ldq)+5*offset]);

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
     h2_real = _mm_loaddup_pd(&hh_dbl[(ldh+1)*2]);
     h2_imag = _mm_loaddup_pd(&hh_dbl[((ldh+1)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h2_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh+1)*2]) )));
     h2_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((ldh+1)*2)+1]) )));
#endif
#endif /* VEC_SET == SSE_128 */

939
940
941
942
943
#if VEC_SET == AVX_256
     h2_real = _SIMD_BROADCAST(&hh_dbl[(ldh+1)*2]);
     h2_imag = _SIMD_BROADCAST(&hh_dbl[((ldh+1)*2)+1]);
#endif /* VEC_SET == AVX_256 */

944
945
946
947
948
949
950
951
952
953
954
955
956
957
#ifndef __ELPA_USE_FMA__
     // conjugate
     h2_imag = _SIMD_XOR(h2_imag, sign);
#endif

     y1 = _SIMD_LOAD(&q_dbl[0]);
     y2 = _SIMD_LOAD(&q_dbl[offset]);
     y3 = _SIMD_LOAD(&q_dbl[2*offset]);
     y4 = _SIMD_LOAD(&q_dbl[3*offset]);
     y5 = _SIMD_LOAD(&q_dbl[4*offset]);
     y6 = _SIMD_LOAD(&q_dbl[5*offset]);

     tmp1 = _SIMD_MUL(h2_imag, x1);
#ifdef __ELPA_USE_FMA__
958
     y1 = _SIMD_ADD(y1, _SIMD_FMSUBADD(h2_real, x1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
959
960
961
962
963
#else
     y1 = _SIMD_ADD(y1, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
     tmp2 = _SIMD_MUL(h2_imag, x2);
#ifdef __ELPA_USE_FMA__
964
     y2 = _SIMD_ADD(y2, _SIMD_FMSUBADD(h2_real, x2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
965
966
967
968
969
970
#else
     y2 = _SIMD_ADD(y2, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

     tmp3 = _SIMD_MUL(h2_imag, x3);
#ifdef __ELPA_USE_FMA__
971
     y3 = _SIMD_ADD(y3, _SIMD_FMSUBADD(h2_real, x3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
972
973
974
975
976
#else
     y3 = _SIMD_ADD(y3, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
     tmp4 = _SIMD_MUL(h2_imag, x4);
#ifdef __ELPA_USE_FMA__
977
     y4 = _SIMD_ADD(y4, _SIMD_FMSUBADD(h2_real, x4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
978
979
980
981
982
983
#else
     y4 = _SIMD_ADD(y4, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

     tmp5 = _SIMD_MUL(h2_imag, x5);
#ifdef __ELPA_USE_FMA__
984
     y5 = _SIMD_ADD(y5, _SIMD_FMSUBADD(h2_real, x5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
985
986
987
988
989
#else
     y5 = _SIMD_ADD(y5, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
     tmp6 = _SIMD_MUL(h2_imag, x6);
#ifdef __ELPA_USE_FMA__
990
     y6 = _SIMD_ADD(y6, _SIMD_FMSUBADD(h2_real, x6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
#else
     y6 = _SIMD_ADD(y6, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#endif /* BLOCK2 */

#ifdef BLOCK1
    x1 = _SIMD_LOAD(&q_dbl[0]);
    x2 = _SIMD_LOAD(&q_dbl[offset]);
    x3 = _SIMD_LOAD(&q_dbl[2*offset]);
    x4 = _SIMD_LOAD(&q_dbl[3*offset]);
    x5 = _SIMD_LOAD(&q_dbl[4*offset]);
    x6 = _SIMD_LOAD(&q_dbl[5*offset]);
#endif

    for (i = BLOCK; i < nb; i++)
    {

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
        h1_real = _mm_loaddup_pd(&hh_dbl[(i-BLOCK+1)*2]);
        h1_imag = _mm_loaddup_pd(&hh_dbl[((i-BLOCK+1)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
        h1_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(i-BLOCK+1)*2]) )));
        h1_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((i-BLOCK+1)*2)+1]) )));
#endif
#endif /* VEC_SET == SSE_128 */

1020
1021
1022
1023
1024
#if VEC_SET == AVX_256
       h1_real = _SIMD_BROADCAST(&hh_dbl[(i-BLOCK+1)*2]);
       h1_imag = _SIMD_BROADCAST(&hh_dbl[((i-BLOCK+1)*2)+1]);
#endif /* VEC_SET == AVX_256 */

1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
#ifndef __ELPA_USE_FMA__
        // conjugate
        h1_imag = _SIMD_XOR(h1_imag, sign);
#endif

        q1 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+0]);
        q2 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+offset]);
        q3 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+2*offset]);
        q4 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+3*offset]);
        q5 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+4*offset]);
        q6 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+5*offset]);

        tmp1 = _SIMD_MUL(h1_imag, q1);
#ifdef __ELPA_USE_FMA__
1039
        x1 = _SIMD_ADD(x1, _SIMD_FMSUBADD(h1_real, q1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1040
1041
1042
1043
1044
#else
        x1 = _SIMD_ADD(x1, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
        tmp2 = _SIMD_MUL(h1_imag, q2);
#ifdef __ELPA_USE_FMA__
1045
        x2 = _SIMD_ADD(x2, _SIMD_FMSUBADD(h1_real, q2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1046
1047
1048
1049
1050
#else
        x2 = _SIMD_ADD(x2, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif
        tmp3 = _SIMD_MUL(h1_imag, q3);
#ifdef __ELPA_USE_FMA__
1051
        x3 = _SIMD_ADD(x3, _SIMD_FMSUBADD(h1_real, q3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1052
1053
1054
1055
1056
1057
#else
        x3 = _SIMD_ADD(x3, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif

        tmp4 = _SIMD_MUL(h1_imag, q4);
#ifdef __ELPA_USE_FMA__
1058
        x4 = _SIMD_ADD(x4, _SIMD_FMSUBADD(h1_real, q4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1059
1060
1061
1062
1063
#else
        x4 = _SIMD_ADD(x4, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif
        tmp5 = _SIMD_MUL(h1_imag, q5);
#ifdef __ELPA_USE_FMA__
1064
        x5 = _SIMD_ADD(x5, _SIMD_FMSUBADD(h1_real, q5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1065
1066
1067
1068
1069
#else
        x5 = _SIMD_ADD(x5, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
        tmp6 = _SIMD_MUL(h1_imag, q6);
#ifdef __ELPA_USE_FMA__
1070
        x6 = _SIMD_ADD(x6, _SIMD_FMSUBADD(h1_real, q6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
#else
        x6 = _SIMD_ADD(x6, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#ifdef BLOCK2

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
          h2_real = _mm_loaddup_pd(&hh_dbl[(ldh+i)*2]);
          h2_imag = _mm_loaddup_pd(&hh_dbl[((ldh+i)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
          h2_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh+i)*2]) )));
          h2_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((ldh+i)*2)+1]) )));
#endif
#endif /* VEC_SET == SSE_128 */

1088
1089
1090
1091
1092
#if VEC_SET == AVX_256
          h2_real = _SIMD_BROADCAST(&hh_dbl[(ldh+i)*2]);
          h2_imag = _SIMD_BROADCAST(&hh_dbl[((ldh+i)*2)+1]);
#endif /* VEC_SET == AVX_256 */

1093
1094
1095
1096
1097
1098
1099
#ifndef __ELPA_USE_FMA__
          // conjugate
          h2_imag = _SIMD_XOR(h2_imag, sign);
#endif

          tmp1 = _SIMD_MUL(h2_imag, q1);
#ifdef __ELPA_USE_FMA__
1100
          y1 = _SIMD_ADD(y1, _SIMD_FMSUBADD(h2_real, q1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1101
1102
1103
1104
1105
#else
          y1 = _SIMD_ADD(y1, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
          tmp2 = _SIMD_MUL(h2_imag, q2);
#ifdef __ELPA_USE_FMA__
1106
          y2 = _SIMD_ADD(y2, _SIMD_FMSUBADD(h2_real, q2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1107
1108
1109
1110
1111
1112
#else
          y2 = _SIMD_ADD(y2, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

          tmp3 = _SIMD_MUL(h2_imag, q3);
#ifdef __ELPA_USE_FMA__
1113
          y3 = _SIMD_ADD(y3, _SIMD_FMSUBADD(h2_real, q3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1114
1115
1116
1117
1118
#else
          y3 = _SIMD_ADD(y3, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
          tmp4 = _SIMD_MUL(h2_imag, q4);
#ifdef __ELPA_USE_FMA__
1119
          y4 = _SIMD_ADD(y4, _SIMD_FMSUBADD(h2_real, q4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1120
1121
1122
1123
1124
1125
#else
          y4 = _SIMD_ADD(y4, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

          tmp5 = _SIMD_MUL(h2_imag, q5);
#ifdef __ELPA_USE_FMA__
1126
          y5 = _SIMD_ADD(y5, _SIMD_FMSUBADD(h2_real, q5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1127
1128
1129
1130
1131
#else
          y5 = _SIMD_ADD(y5, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
          tmp6 = _SIMD_MUL(h2_imag, q6);
#ifdef __ELPA_USE_FMA__
1132
          y6 = _SIMD_ADD(y6, _SIMD_FMSUBADD(h2_real, q6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
#else
          y6 = _SIMD_ADD(y6, _SIMD_ADDSUB( _SIMD_MUL(h2_real, q6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif
	
#endif /* BLOCK2 */

    }

#ifdef BLOCK2

1143
#if VEC_SET == SSE_128
1144
1145
1146
1147
1148
1149
1150
1151
#ifdef DOUBLE_PRECISION_COMPLEX
     h1_real = _mm_loaddup_pd(&hh_dbl[(nb-1)*2]);
     h1_imag = _mm_loaddup_pd(&hh_dbl[((nb-1)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h1_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(nb-1)*2]) )));
     h1_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((nb-1)*2)+1]) )));
#endif
1152
1153
1154
1155
1156
1157
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
     h1_real = _SIMD_BROADCAST(&hh_dbl[(nb-1)*2]);
     h1_imag = _SIMD_BROADCAST(&hh_dbl[((nb-1)*2)+1]);
#endif /* VEC_SET == AVX_256 */
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172

#ifndef __ELPA_USE_FMA__
     // conjugate
     h1_imag = _SIMD_XOR(h1_imag, sign);
#endif

     q1 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+0]);
     q2 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+offset]);
     q3 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+2*offset]);
     q4 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+3*offset]);
     q5 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+4*offset]);
     q6 = _SIMD_LOAD(&q_dbl[(2*nb*ldq)+5*offset]);

     tmp1 = _SIMD_MUL(h1_imag, q1);
#ifdef __ELPA_USE_FMA__
1173
     x1 = _SIMD_ADD(x1, _SIMD_FMSUBADD(h1_real, q1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1174
1175
1176
1177
1178
#else
     x1 = _SIMD_ADD(x1, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
     tmp2 = _SIMD_MUL(h1_imag, q2);
#ifdef __ELPA_USE_FMA__
1179
     x2 = _SIMD_ADD(x2, _SIMD_FMSUBADD(h1_real, q2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1180
1181
1182
1183
1184
1185
#else
     x2 = _SIMD_ADD(x2, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

     tmp3 = _SIMD_MUL(h1_imag, q3);
#ifdef __ELPA_USE_FMA__
1186
     x3 = _SIMD_ADD(x3, _SIMD_FMSUBADD(h1_real, q3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1187
1188
1189
1190
1191
#else
     x3 = _SIMD_ADD(x3, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
     tmp4 = _SIMD_MUL(h1_imag, q4);
#ifdef __ELPA_USE_FMA__
1192
     x4 = _SIMD_ADD(x4, _SIMD_FMSUBADD(h1_real, q4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1193
1194
1195
1196
1197
1198
#else
     x4 = _SIMD_ADD(x4, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

     tmp5 = _SIMD_MUL(h1_imag, q5);
#ifdef __ELPA_USE_FMA__
1199
     x5 = _SIMD_ADD(x5, _SIMD_FMSUBADD(h1_real, q5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1200
1201
1202
1203
1204
#else
     x5 = _SIMD_ADD(x5, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
     tmp6 = _SIMD_MUL(h1_imag, q6);
#ifdef __ELPA_USE_FMA__
1205
     x6 = _SIMD_ADD(x6, _SIMD_FMSUBADD(h1_real, q6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
#else
     x6 = _SIMD_ADD(x6, _SIMD_ADDSUB( _SIMD_MUL(h1_real, q6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#endif /* BLOCK2 */

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
    h1_real = _mm_loaddup_pd(&hh_dbl[0]);
    h1_imag = _mm_loaddup_pd(&hh_dbl[1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
    h1_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[0]) )));
    h1_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[1]) )));
#endif
#endif /*  VEC_SET == SSE_128 */

1223
1224
1225
1226
1227
#if VEC_SET == AVX_256
    h1_real = _SIMD_BROADCAST(&hh_dbl[0]);
    h1_imag = _SIMD_BROADCAST(&hh_dbl[1]);
#endif /* VEC_SET == AVX_256 */

1228
1229
1230
1231
1232
    h1_real = _SIMD_XOR(h1_real, sign);
    h1_imag = _SIMD_XOR(h1_imag, sign);

    tmp1 = _SIMD_MUL(h1_imag, x1);
#ifdef __ELPA_USE_FMA__
1233
    x1 = _SIMD_FMADDSUB(h1_real, x1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
1234
1235
1236
1237
1238
#else
    x1 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
#endif
    tmp2 = _SIMD_MUL(h1_imag, x2);
#ifdef __ELPA_USE_FMA__
1239
    x2 = _SIMD_FMADDSUB(h1_real, x2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE));
1240
1241
1242
1243
1244
#else
    x2 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE));
#endif
    tmp3 = _SIMD_MUL(h1_imag, x3);
#ifdef __ELPA_USE_FMA__
1245
    x3 = _SIMD_FMADDSUB(h1_real, x3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE));
1246
1247
1248
1249
1250
1251
#else
    x3 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE));
#endif

    tmp4 = _SIMD_MUL(h1_imag, x4);
#ifdef __ELPA_USE_FMA__
1252
    x4 = _SIMD_FMADDSUB(h1_real, x4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE));
1253
1254
1255
1256
1257
#else
    x4 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE));
#endif
    tmp5 = _SIMD_MUL(h1_imag, x5);
#ifdef __ELPA_USE_FMA__
1258
    x5 = _SIMD_FMADDSUB(h1_real, x5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE));
1259
1260
1261
1262
1263
#else
    x5 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE));
#endif
    tmp6 = _SIMD_MUL(h1_imag, x6);
#ifdef __ELPA_USE_FMA__
1264
    x6 = _SIMD_FMADDSUB(h1_real, x6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE));
1265
1266
1267
1268
1269
#else
    x6 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, x6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE));
#endif

#ifdef BLOCK2
1270
1271

#if VEC_SET == SSE_128    
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
#ifdef DOUBLE_PRECISION_COMPLEX
     h1_real = _mm_loaddup_pd(&hh_dbl[ldh*2]);
     h1_imag = _mm_loaddup_pd(&hh_dbl[(ldh*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h1_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[ldh*2]) )));
     h1_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh*2)+1]) )));
#endif

#ifdef DOUBLE_PRECISION_COMPLEX
     h2_real = _mm_loaddup_pd(&hh_dbl[ldh*2]);
     h2_imag = _mm_loaddup_pd(&hh_dbl[(ldh*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h2_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[ldh*2]) )));
     h2_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh*2)+1]) )));
#endif
1289
1290
1291
1292
1293
1294
1295
1296
#endif /* VEC_SET == 128 */

#if VEC_SET == AVX_256
     h1_real = _SIMD_BROADCAST(&hh_dbl[ldh*2]);
     h1_imag = _SIMD_BROADCAST(&hh_dbl[(ldh*2)+1]);
     h2_real = _SIMD_BROADCAST(&hh_dbl[ldh*2]);
     h2_imag = _SIMD_BROADCAST(&hh_dbl[(ldh*2)+1]);
#endif /* VEC_SET == AVX_256 */
1297
1298
1299
1300
1301
1302

     h1_real = _SIMD_XOR(h1_real, sign);
     h1_imag = _SIMD_XOR(h1_imag, sign);
     h2_real = _SIMD_XOR(h2_real, sign);
     h2_imag = _SIMD_XOR(h2_imag, sign);

1303
#if VEC_SET == SSE_128
1304
1305
1306
1307
1308
#ifdef SINGLE_PRECISION_COMPLEX
     tmp2 = _mm_castpd_ps(_mm_load_pd1((double *) s_dbl));
#else
     tmp2 = _SIMD_LOADU(s_dbl);
#endif
1309
#endif /* VEC_SET == SSE_128 */
1310

1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
#if VEC_SET == AVX_256
#ifdef DOUBLE_PRECISION_COMPLEX
     tmp2 = _mm256_set_pd(s_dbl[1], s_dbl[0], s_dbl[1], s_dbl[0]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     tmp2 = _mm256_set_ps(s_dbl[1], s_dbl[0], s_dbl[1], s_dbl[0],
                             s_dbl[1], s_dbl[0], s_dbl[1], s_dbl[0]);
#endif
#endif /* VEC_SET == AVX_256 */

     tmp1 = _SIMD_MUL(h2_imag, tmp2);
1322
#ifdef __ELPA_USE_FMA__
1323
     tmp2 = _SIMD_FMSUBADD(h2_real, tmp2, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
1324
1325
1326
1327
#else
     tmp2 = _SIMD_ADDSUB( _SIMD_MUL(h2_real, tmp2), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
#endif

1328
#if VEC_SET == SSE_128
1329
1330
1331
1332
1333
1334
1335
1336
#ifdef DOUBLE_PRECISION_COMPLEX
     h2_real = _mm_movedup_pd(tmp2);
     h2_imag = _mm_set1_pd(tmp2[1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h2_real = _mm_moveldup_ps(tmp2);
     h2_imag = _mm_movehdup_ps(tmp2);
#endif
1337
1338
1339
1340
1341
1342
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
     h2_real = _SIMD_SET1(tmp2[0]);
     h2_imag = _SIMD_SET1(tmp2[1]);
#endif /* VEC_SET == AVX_256 */
1343
1344
1345

     tmp1 = _SIMD_MUL(h1_imag, y1);
#ifdef __ELPA_USE_FMA__
1346
     y1 = _SIMD_FMSUBADD(h1_real, y1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
1347
1348
1349
1350
1351
#else
     y1 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE));
#endif
     tmp2 = _SIMD_MUL(h1_imag, y2);
#ifdef __ELPA_USE_FMA__
1352
     y2 = _SIMD_FMSUBADD(h1_real, y2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE));
1353
1354
1355
1356
1357
1358
#else
     y2 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE));
#endif

     tmp3 = _SIMD_MUL(h1_imag, y3);
#ifdef __ELPA_USE_FMA__
1359
     y3 = _SIMD_FMSUBADD(h1_real, y3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE));
1360
1361
1362
1363
1364
#else
     y3 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE));
#endif
     tmp4 = _SIMD_MUL(h1_imag, y4);
#ifdef __ELPA_USE_FMA__
1365
     y4 = _SIMD_FMSUBADD(h1_real, y4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE));
1366
1367
1368
1369
1370
1371
#else
     y4 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE));
#endif

     tmp5 = _SIMD_MUL(h1_imag, y5);
#ifdef __ELPA_USE_FMA__
1372
     y5 = _SIMD_FMSUBADD(h1_real, y5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE));
1373
1374
1375
1376
1377
#else
     y5 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE));
#endif
     tmp6 = _SIMD_MUL(h1_imag, y6);
#ifdef __ELPA_USE_FMA__
1378
     y6 = _SIMD_FMSUBADD(h1_real, y6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE));
1379
1380
1381
1382
1383
1384
#else
     y6 = _SIMD_ADDSUB( _SIMD_MUL(h1_real, y6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE));
#endif

     tmp1 = _SIMD_MUL(h2_imag, x1);
#ifdef __ELPA_USE_FMA__
1385
     y1 = _SIMD_ADD(y1, _SIMD_FMSUBADD(h2_real, x1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1386
1387
1388
1389
1390
#else
     y1 = _SIMD_ADD(y1, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
     tmp2 = _SIMD_MUL(h2_imag, x2);
#ifdef __ELPA_USE_FMA__
1391
     y2 = _SIMD_ADD(y2, _SIMD_FMSUBADD(h2_real, x2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1392
1393
1394
1395
1396
1397
#else
     y2 = _SIMD_ADD(y2, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

     tmp3 = _SIMD_MUL(h2_imag, x3);
#ifdef __ELPA_USE_FMA__
1398
     y3 = _SIMD_ADD(y3, _SIMD_FMSUBADD(h2_real, x3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1399
1400
1401
1402
1403
#else
     y3 = _SIMD_ADD(y3, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
     tmp4 = _SIMD_MUL(h2_imag, x4);
#ifdef __ELPA_USE_FMA__
1404
     y4 = _SIMD_ADD(y4, _SIMD_FMSUBADD(h2_real, x4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1405
1406
1407
1408
1409
1410
#else
     y4 = _SIMD_ADD(y4, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

     tmp5 = _SIMD_MUL(h2_imag, x5);
#ifdef __ELPA_USE_FMA__
1411
     y5 = _SIMD_ADD(y5, _SIMD_FMSUBADD(h2_real, x5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1412
1413
1414
1415
1416
#else
     y5 = _SIMD_ADD(y5, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
     tmp6 = _SIMD_MUL(h2_imag, x6);
#ifdef __ELPA_USE_FMA__
1417
     y6 = _SIMD_ADD(y6, _SIMD_FMSUBADD(h2_real, x6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
#else
     y6 = _SIMD_ADD(y6, _SIMD_ADDSUB( _SIMD_MUL(h2_real, x6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#endif /* BLOCK2 */

    q1 = _SIMD_LOAD(&q_dbl[0]);
    q2 = _SIMD_LOAD(&q_dbl[offset]);
    q3 = _SIMD_LOAD(&q_dbl[2*offset]);
    q4 = _SIMD_LOAD(&q_dbl[3*offset]);
    q5 = _SIMD_LOAD(&q_dbl[4*offset]);
    q6 = _SIMD_LOAD(&q_dbl[5*offset]);

1431
#ifdef BLOCK1
1432
1433
1434
1435
1436
1437
    q1 = _SIMD_ADD(q1, x1);
    q2 = _SIMD_ADD(q2, x2);
    q3 = _SIMD_ADD(q3, x3);
    q4 = _SIMD_ADD(q4, x4);
    q5 = _SIMD_ADD(q5, x5);
    q6 = _SIMD_ADD(q6, x6);
1438
#endif
1439

1440
1441
1442
1443
1444
1445
1446
1447
1448

#ifdef BLOCK2
    q1 = _SIMD_ADD(q1, y1);
    q2 = _SIMD_ADD(q2, y2);
    q3 = _SIMD_ADD(q3, y3);
    q4 = _SIMD_ADD(q4, y4);
    q5 = _SIMD_ADD(q5, y5);
    q6 = _SIMD_ADD(q6, y6);
#endif
1449

1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
    _SIMD_STORE(&q_dbl[0], q1);
    _SIMD_STORE(&q_dbl[offset], q2);
    _SIMD_STORE(&q_dbl[2*offset], q3);
    _SIMD_STORE(&q_dbl[3*offset], q4);
    _SIMD_STORE(&q_dbl[4*offset], q5);
    _SIMD_STORE(&q_dbl[5*offset], q6);


#ifdef BLOCK2

1460
#if VEC_SET == SSE_128
1461
1462
1463
1464
1465
1466
1467
1468
#ifdef DOUBLE_PRECISION_COMPLEX
     h2_real = _mm_loaddup_pd(&hh_dbl[(ldh+1)*2]);
     h2_imag = _mm_loaddup_pd(&hh_dbl[((ldh+1)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
     h2_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh+1)*2]) )));
     h2_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((ldh+1)*2)+1]) )));
#endif
1469
1470
1471
1472
1473
1474
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
     h2_real = _SIMD_BROADCAST(&hh_dbl[(ldh+1)*2]);
     h2_imag = _SIMD_BROADCAST(&hh_dbl[((ldh+1)*2)+1]);
#endif /* VEC_SET == AVX_256 */
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491

     q1 = _SIMD_LOAD(&q_dbl[(ldq*2)+0]);
     q2 = _SIMD_LOAD(&q_dbl[(ldq*2)+offset]);
     q3 = _SIMD_LOAD(&q_dbl[(ldq*2)+2*offset]);
     q4 = _SIMD_LOAD(&q_dbl[(ldq*2)+3*offset]);
     q5 = _SIMD_LOAD(&q_dbl[(ldq*2)+4*offset]);
     q6 = _SIMD_LOAD(&q_dbl[(ldq*2)+5*offset]);

     q1 = _SIMD_ADD(q1, x1);
     q2 = _SIMD_ADD(q2, x2);
     q3 = _SIMD_ADD(q3, x3);
     q4 = _SIMD_ADD(q4, x4);
     q5 = _SIMD_ADD(q5, x5);
     q6 = _SIMD_ADD(q6, x6);

     tmp1 = _SIMD_MUL(h2_imag, y1);
#ifdef __ELPA_USE_FMA__
1492
     q1 = _SIMD_ADD(q1, _SIMD_FMSUBADD(h2_real, y1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1493
1494
1495
1496
1497
#else
     q1 = _SIMD_ADD(q1, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
     tmp2 = _SIMD_MUL(h2_imag, y2);
#ifdef __ELPA_USE_FMA__
1498
     q2 = _SIMD_ADD(q2, _SIMD_FMSUBADD(h2_real, y2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1499
1500
1501
1502
1503
1504
#else
     q2 = _SIMD_ADD(q2, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

     tmp3 = _SIMD_MUL(h2_imag, y3);
#ifdef __ELPA_USE_FMA__
1505
     q3 = _SIMD_ADD(q3, _SIMD_FMSUBADD(h2_real, y3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1506
1507
1508
1509
1510
#else
     q3 = _SIMD_ADD(q3, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
     tmp4 = _SIMD_MUL(h2_imag, y4);
#ifdef __ELPA_USE_FMA__
1511
     q4 = _SIMD_ADD(q4, _SIMD_FMSUBADD(h2_real, y4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1512
1513
1514
1515
1516
1517
#else
     q4 = _SIMD_ADD(q4, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

     tmp5 = _SIMD_MUL(h2_imag, y5);
#ifdef __ELPA_USE_FMA__
1518
     q5 = _SIMD_ADD(q5, _SIMD_FMSUBADD(h2_real, y5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1519
1520
1521
1522
1523
#else
     q5 = _SIMD_ADD(q5, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
     tmp6 = _SIMD_MUL(h2_imag, y6);
#ifdef __ELPA_USE_FMA__
1524
     q6 = _SIMD_ADD(q6, _SIMD_FMSUBADD(h2_real, y6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
#else
     q6 = _SIMD_ADD(q6, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

     _SIMD_STORE(&q_dbl[(ldq*2)+0], q1);
     _SIMD_STORE(&q_dbl[(ldq*2)+offset], q2);
     _SIMD_STORE(&q_dbl[(ldq*2)+2*offset], q3);
     _SIMD_STORE(&q_dbl[(ldq*2)+3*offset], q4);
     _SIMD_STORE(&q_dbl[(ldq*2)+4*offset], q5);
     _SIMD_STORE(&q_dbl[(ldq*2)+5*offset], q6);

#endif /* BLOCK2 */


    for (i = BLOCK; i < nb; i++)
    {

#if VEC_SET == SSE_128
#ifdef DOUBLE_PRECISION_COMPLEX
        h1_real = _mm_loaddup_pd(&hh_dbl[(i-BLOCK+1)*2]);
        h1_imag = _mm_loaddup_pd(&hh_dbl[((i-BLOCK+1)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
        h1_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(i-BLOCK+1)*2]) )));
        h1_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((i-BLOCK+1)*2)+1]) )));
#endif
#endif /* VEC_SET == SSE_128 */

1553
1554
1555
1556
1557
#if VEC_SET == AVX_256
	h1_real = _SIMD_BROADCAST(&hh_dbl[(i-BLOCK+1)*2]);
        h1_imag = _SIMD_BROADCAST(&hh_dbl[((i-BLOCK+1)*2)+1]);
#endif /* VEC_SET == AVX_256 */

1558
1559
1560
1561
1562
1563
1564
        q1 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+0]);
        q2 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+offset]);
        q3 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+2*offset]);
        q4 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+3*offset]);
        q5 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+4*offset]);
        q6 = _SIMD_LOAD(&q_dbl[(2*i*ldq)+5*offset]);

1565
        tmp1 = _SIMD_MUL(h1_imag, x1);
1566
#ifdef __ELPA_USE_FMA__
1567
        q1 = _SIMD_ADD(q1, _SIMD_FMADDSUB(h1_real, x1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1568
1569
1570
1571
1572
#else
        q1 = _SIMD_ADD(q1, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
        tmp2 = _SIMD_MUL(h1_imag, x2);
#ifdef __ELPA_USE_FMA__
1573
        q2 = _SIMD_ADD(q2, _SIMD_FMADDSUB(h1_real, x2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1574
1575
1576
1577
1578
#else
        q2 = _SIMD_ADD(q2, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif
        tmp3 = _SIMD_MUL(h1_imag, x3);
#ifdef __ELPA_USE_FMA__
1579
        q3 = _SIMD_ADD(q3, _SIMD_FMADDSUB(h1_real, x3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1580
1581
1582
1583
1584
1585
#else
        q3 = _SIMD_ADD(q3, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif

         tmp4 = _SIMD_MUL(h1_imag, x4);
#ifdef __ELPA_USE_FMA__
1586
         q4 = _SIMD_ADD(q4, _SIMD_FMADDSUB(h1_real, x4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1587
1588
1589
1590
1591
#else
         q4 = _SIMD_ADD(q4, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif
         tmp5 = _SIMD_MUL(h1_imag, x5);
#ifdef __ELPA_USE_FMA__
1592
         q5 = _SIMD_ADD(q5, _SIMD_FMADDSUB(h1_real, x5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1593
1594
1595
1596
1597
#else
         q5 = _SIMD_ADD(q5, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
         tmp6 = _SIMD_MUL(h1_imag, x6);
#ifdef __ELPA_USE_FMA__
1598
         q6 = _SIMD_ADD(q6, _SIMD_FMADDSUB(h1_real, x6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1599
1600
1601
1602
1603
1604
#else
         q6 = _SIMD_ADD(q6, _SIMD_ADDSUB( _SIMD_MUL(h1_real, x6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#ifdef BLOCK2

1605
#if VEC_SET == SSE_128
1606
1607
1608
1609
1610
1611
1612
1613
#ifdef DOUBLE_PRECISION_COMPLEX
          h2_real = _mm_loaddup_pd(&hh_dbl[(ldh+i)*2]);
          h2_imag = _mm_loaddup_pd(&hh_dbl[((ldh+i)*2)+1]);
#endif
#ifdef SINGLE_PRECISION_COMPLEX
          h2_real = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[(ldh+i)*2]) )));
          h2_imag = _mm_moveldup_ps(_mm_castpd_ps(_mm_loaddup_pd( (double *)(&hh_dbl[((ldh+i)*2)+1]) )));
#endif
1614
1615
1616
1617
1618
1619
#endif /* VEC_SET == SSE_128 */

#if VEC_SET == AVX_256
	  h2_real = _SIMD_BROADCAST(&hh_dbl[(ldh+i)*2]);
          h2_imag = _SIMD_BROADCAST(&hh_dbl[((ldh+i)*2)+1]);
#endif /* VEC_SET == AVX_256 */
1620
1621
1622

          tmp1 = _SIMD_MUL(h2_imag, y1);
#ifdef __ELPA_USE_FMA__
1623
          q1 = _SIMD_ADD(q1, _SIMD_FMSUBADD(h2_real, y1, _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
1624
1625
1626
1627
1628
#else
          q1 = _SIMD_ADD(q1, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y1), _SIMD_SHUFFLE(tmp1, tmp1, _SHUFFLE)));
#endif
          tmp2 = _SIMD_MUL(h2_imag, y2);
#ifdef __ELPA_USE_FMA__
1629
          q2 = _SIMD_ADD(q2, _SIMD_FMSUBADD(h2_real, y2, _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
1630
1631
1632
1633
1634
1635
#else
          q2 = _SIMD_ADD(q2, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y2), _SIMD_SHUFFLE(tmp2, tmp2, _SHUFFLE)));
#endif

          tmp3 = _SIMD_MUL(h2_imag, y3);
#ifdef __ELPA_USE_FMA__
1636
          q3 = _SIMD_ADD(q3, _SIMD_FMSUBADD(h2_real, y3, _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
1637
1638
1639
1640
1641
#else
          q3 = _SIMD_ADD(q3, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y3), _SIMD_SHUFFLE(tmp3, tmp3, _SHUFFLE)));
#endif
          tmp4 = _SIMD_MUL(h2_imag, y4);
#ifdef __ELPA_USE_FMA__
1642
          q4 = _SIMD_ADD(q4, _SIMD_FMSUBADD(h2_real, y4, _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
1643
1644
1645
1646
1647
1648
#else
          q4 = _SIMD_ADD(q4, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y4), _SIMD_SHUFFLE(tmp4, tmp4, _SHUFFLE)));
#endif

          tmp5 = _SIMD_MUL(h2_imag, y5);
#ifdef __ELPA_USE_FMA__
1649
          q5 = _SIMD_ADD(q5, _SIMD_FMSUBADD(h2_real, y5, _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
1650
1651
1652
1653
1654
#else
          q5 = _SIMD_ADD(q5, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y5), _SIMD_SHUFFLE(tmp5, tmp5, _SHUFFLE)));
#endif
          tmp6 = _SIMD_MUL(h2_imag, y6);
#ifdef __ELPA_USE_FMA__
1655
          q6 = _SIMD_ADD(q6, _SIMD_FMSUBADD(h2_real, y6, _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
#else
          q6 = _SIMD_ADD(q6, _SIMD_ADDSUB( _SIMD_MUL(h2_real, y6), _SIMD_SHUFFLE(tmp6, tmp6, _SHUFFLE)));
#endif

#endif /* BLOCK2 */


         _SIMD_STORE(&q_dbl[(2*i*ldq)+0], q1);
         _SIMD_STORE(&q_dbl[(2*i*ldq)+offset], q2);
         _SIMD_STORE(&q_dbl[(2*i*ldq)+2*offset], q3);
         _SIMD_STORE(&q_dbl[(2*i*ldq)+3*offset], q4);
         _SIMD_STORE(&q_dbl[(2*i*ldq)+4*offset], q5);
         _SIMD_STORE(&q_dbl[(2*i*ldq)+5*offset], q6);
    }
#ifdef BLOCK2
1671
1672

#if VEC_SET == SSE_128     
1673
1674
1675
1676
1677
1678
1679