Commit 68fc15c5 authored by Serhiy Mochalskyy's avatar Serhiy Mochalskyy
Browse files

new version of a_pwe_s_computing

parent 44c809f5
Pipeline #1848 skipped
......@@ -11,13 +11,23 @@ include "mpif.h"
integer :: i,j,k
integer INDXG2P, BLACS_PNUM
EXTERNAL INDXG2P, BLACS_PNUM
integer BLACS_PNUM
EXTERNAL BLACS_PNUM
INTEGER STATUS(MPI_STATUS_SIZE)
integer :: ipr_rec=0,ipc_rec=0,ip_proc_rec=0,ipr_send=0, ipc_send=0
integer :: ip_proc_send=0,i_loc=0,j_loc=0, i_loc2=0,j_loc2=0
real,dimension(: ),allocatable :: a_pwe_arr , a_pwe_arr_tot
allocate(a_pwe_arr(npot_p), a_pwe_arr_tot(npot_p),stat=ier)
IF (IER /= 0) THEN
WRITE (*,*) "a_pwe_s computing : Can not allocate local matrix a_pwe_arr: MY_PROC_NUM=",&
MYPNUM, "MP_A,NQ_A=", MP_A,NQ_A
STOP
END IF
a_pwe_arr=0. ; a_pwe_arr_tot=0.
if(rank==0) write(*,*) 'a_pwe_s_computing begins'
!Set up scalapack sub grid
......@@ -33,31 +43,29 @@ if(rank==0) write(*,*) 'a_pwe_s_computing begins'
a_pwe_loc_s=0.
do j=1,nd_bez
call ScaLAPACK_mapping_j_2(j+nd_w,j_loc,ipc_send)
do i=1,npot_p
call ScaLAPACK_mapping_i_2(i,i_loc,ipr_send)
call ScaLAPACK_mapping_j_2(j+nd_w,j_loc,ipc_send)
ip_proc_send = BLACS_PNUM(CONTEXT, ipr_send, ipc_send)
if( MYPNUM==ip_proc_send) a_pwe_arr(i) = a_pwe_loc(i_loc,j_loc)
enddo
CALL MPI_ALLREDUCE(a_pwe_arr, a_pwe_arr_tot, npot_p, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
call ScaLAPACK_mapping_j_2(j,j_loc2,ipc_rec)
do i=1,npot_p
call ScaLAPACK_mapping_i_2(i,i_loc2,ipr_rec)
call ScaLAPACK_mapping_j_2(j,j_loc2,ipc_rec)
ip_proc_rec = BLACS_PNUM(CONTEXT, ipr_rec, ipc_rec)
if( MYPNUM==ip_proc_rec) a_pwe_loc_s(i_loc2,j_loc2) = a_pwe_arr_tot(i)
enddo
a_pwe_arr=0.; a_pwe_arr_tot=0.
if (ip_proc_rec==ip_proc_send .AND. MYPNUM==ip_proc_rec) then
a_pwe_loc_s(i_loc2,j_loc2)=a_pwe_loc(i_loc,j_loc)
else
if( MYPNUM==ip_proc_rec) then
call MPI_RECV(a_pwe_loc_s(i_loc2,j_loc2) , 1, MPI_DOUBLE_PRECISION, ip_proc_send, &
67, MPI_COMM_WORLD, status, ier )
endif
if( MYPNUM==ip_proc_send) then
call MPI_SEND( a_pwe_loc(i_loc,j_loc), 1, MPI_DOUBLE_PRECISION, ip_proc_rec, &
67, MPI_COMM_WORLD, ier)
endif
endif
enddo
enddo
if(rank==0) write(*,*) 'a_pwe_s_computing done'
......
......@@ -161,6 +161,12 @@ if(rank==0)write(180+rank,*) time9-time8
call a_pwe_s_computing
!==================================================================
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc_s), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(10000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time10=MPI_WTIME()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment