Commit 55cee5b5 authored by Serhiy Mochalskyy's avatar Serhiy Mochalskyy
Browse files

delete unused filders

parent ac994cc6
Pipeline #1721 skipped
subroutine solver
use icontr
use solv
use contr_su
use tri_w
use tri_p
use coil2d
use mpi_v
use sca
use time
!-----------------------------------------------------------------------
implicit none
include "mpif.h"
integer :: i,j,k
real :: test_sum=0.
!-----------------------------------------------------------------------
write(*,*) ' SOLVER: n_dof_bnd : ',n_dof_bnd
nd_bez = (nd_harm+nd_harm0)/2 * n_dof_bnd
nd_w = ncoil + npot_w
nd_we = nd_w + nd_bez
write(*,*) ' SOLVER: nd_bez : ',nd_bez
write(*,*) ' SOLVER: nd_w : ' ,nd_w
write(*,*) ' SOLVER: nd_we : ',nd_we
write(*,*) ' SOLVER: npot_p : ',npot_p
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time1=MPI_WTIME()
!-----------------------------------------------------------------------
!!!!!call perfon('ma_pp')
!call matrix_pp
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time2=MPI_WTIME()
if(rank==0)write(110+rank,*) time2-time1
!!!!!call perfoff
!!!!!call perfon('ma_wp')
!call matrix_wp
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time3=MPI_WTIME()
if(rank==0)write(120+rank,*) time3-time2
!stop
!!!!!call perfoff
!!!!!call perfon('ma_ww')
!call matrix_ww
!!!!!call perfoff
!!!!!call perfon('ma_rw')
!call matrix_rw
!stop
!!!!!call perfoff
if (nu_coil.ne.0) then
!!!!!call perfon('coil')
call matrix_cc
call matrix_cp
call matrix_wc
call matrix_rc
!!!!!call perfoff
endif
!!!!!call perfon('ma_pe')
call matrix_pe
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(3000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
deallocate(a_pwe_loc)
call matrix_pe2
!a_pwe_loc=0
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(3100+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc_s), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(3200+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time4=MPI_WTIME()
if(rank==0)write(130+rank,*) time4-time3
stop
!!!!!call perfoff
!!!!!call perfon('ma_ep!')
call matrix_ep
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time5=MPI_WTIME()
if(rank==0)write(140+rank,*) time5-time4
!call MPI_BARRIER(MPI_COMM_WORLD,ier)
!stop
!!!!!call perfoff
!!!!!call perfon('ma_ew')
call matrix_ew
!!!!!call perfoff
!test_sum=0
!CALL MPI_ALLREDUCE(sum(a_ew_loc_sca), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(6000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time6=MPI_WTIME()
if(rank==0)write(150+rank,*) time6-time5
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!stop
if (nu_coil.ne.0) then
!!!!!call perfon('ma_ec')
call matrix_ec
!!!!!call perfoff
endif
write(*,'(A,i5)') 'SOLVER : npot_p : ',npot_p
write(*,'(A,i5)') 'SOLVER : nd_w : ',nd_w
write(*,'(A,2i5,A,2i5)') 'SOLVER : copying a_wp ', nd_w, npot_p,' to a_pwe ',npot_w,nd_we
call a_pe_transpose_sca
call cholesky_solver
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time7=MPI_WTIME()
if(rank==0)write(160+rank,*) time7-time6
deallocate(a_pp_loc)
!compute a_ee = a_ep*a_pp^(-1)*a_pe a_ee = vacuum_response
!do i=1,nd_bez
! do k=1,nd_bez
! do j=1,npot_p
! a_ee(i,k) = a_ee(i,k)+a_ep(i,j)*a_pwe(j,k+nd_w)
! enddo
! enddo
!enddo
!call a_ee_computing
!test_sum=0
!CALL MPI_ALLREDUCE(sum(a_ee_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(8300+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD,ier)
!deallocate(a_ee_loc)
!call a_ee_computing2
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time8=MPI_WTIME()
if(rank==0)write(170+rank,*) time8-time7
!test_sum=0
!CALL MPI_ALLREDUCE(sum(a_ee_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(8400+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!=====================================================================================================
!-----------------------------------------------------------------------
! compute parallel component of magnetic field
allocate(a_we(nd_w,nd_bez),a_ee(nd_bez,nd_bez))
a_we = 0.
!compute a_ew = a_ew - a_ep*a_pp^(-1)*a_pw
!do i=1,nd_bez
! do k=1,nd_w
! do j=1,npot_p
! a_ew(i,k) = a_ew(i,k) - a_ep(i,j)*a_pwe(j,k)
! enddo
! enddo
!enddo
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ew_loc_sca), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(9000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ep_loc_sca), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(9100+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(9200+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
if(rank==0) write(7000+rank,*) nd_bez, nd_w, npot_p
!==================================================================
call a_ew_computing2
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time9=MPI_WTIME()
if(rank==0)write(180+rank,*) time9-time8
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ew_loc_sca), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(9300+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
stop
!$OMP DO
do i=1,nd_bez
do k=1,nd_w
do j=1,npot_p
a_ew(i,k) = a_ew(i,k) - a_ep(i,j)*a_pwe(j,k)
enddo
enddo
enddo
!$OMP END DO
! compute a_we = a_wp*a_pp^(-1)*a_pe
!$OMP DO
do i=1,nd_w
do k= 1,nd_bez
do j=1,npot_p
a_we(i,k) = a_we(i,k) +a_wp(i,j)*a_pwe(j,k+nd_w)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
!
! a_ww = a_ww - a_wp*a_pp^(-1)*a_pw
call dgemm('N','N',nd_w,nd_w,npot_p,-1.,a_wp,nd_w,a_pwe,npot_p,1.,a_ww,nd_w)
end subroutine solver
! ----------------------------------------------------------------------
subroutine matrix_ep
! ----------------------------------------------------------------------
! purpose: 17/08/09
!
! ----------------------------------------------------------------------
use icontr
use contr_su
use tri_p
! use tri_w
use solv
use time
use mpi_v
use sca
! ----------------------------------------------------------------------
implicit none
include "mpif.h"
! ----------------------------------------------------------------------
real,dimension(:,:),allocatable :: b_par,b_par0
real,dimension( :),allocatable :: bx,by,bz,a0_c,b0_c
real,dimension(:,:),allocatable :: a_c,a_s,b_c,b_s
real :: pi2,alv,fnv
integer :: i,i0,i1,i_start,j, k_start
integer :: k,kp,ku,kv,l,nd_e,nuv
! ----------------------------------------------------------------------
real,dimension(:,:),allocatable :: b_par_loc,b_par0_loc
real,dimension(:,:),allocatable :: a_c_loc,b_c_loc,a_s_loc
real,dimension(:,:),allocatable :: a_ep_loc
integer :: nuv_loc_b,nuv_loc_e, npot_p_loc_b, npot_p_loc_e
integer :: npot_p_loc_b_m1
real ::test_sum
write(6,*) 'compute matrix_ep '
pi2 = 4.*asin(1.)
nuv = nu*nv
fnv = 1./float(nv)
alv = pi2*fnv
nd_e = (nd_harm+nd_harm0)/2*n_dof_bnd
i_start = 1
allocate(a_ep(nd_e,npot_p),b_par(nuv,npot_p), &
a_c(nu,npot_p), a_s(nu,npot_p), &
b_c(n_dof_bnd,npot_p),b_s(n_dof_bnd,npot_p), stat=ier )
a_ep=0.; b_par = 0.; a_c=0.; a_s=0.; b_c=0.; b_s=0.
! step=nuv/NPROCS
! nuv_loc_b=(rank)*step+1
! nuv_loc_e=(rank+1)*step
! if(rank==NPROCS-1) nuv_loc_e=nuv
step=npot_p/NPROCS
npot_p_loc_b=(rank)*step+1
npot_p_loc_e=(rank+1)*step
if(rank==NPROCS-1) npot_p_loc_e=npot_p
npot_p_loc_b_m1=npot_p_loc_b
if(rank>0) npot_p_loc_b_m1=npot_p_loc_b-1
!write(800+rank,*) npot_p_loc_b, npot_p_loc_e
allocate(b_par_loc(nuv,npot_p_loc_b_m1:npot_p_loc_e),a_c_loc(nu,npot_p_loc_b:npot_p_loc_e), &
b_c_loc(n_dof_bnd,npot_p_loc_b:npot_p_loc_e), a_ep_loc(nd_e,npot_p_loc_b:npot_p_loc_e), &
a_s_loc(nu,npot_p_loc_b:npot_p_loc_e), stat=ier)
if (ier /= 0) then
print *,'Matrix_ep.f90: Can not allocate local arrays b_par_loc', rank
stop
endif
!write(150+rank,*) npot_p_loc_b, npot_p_loc_e
!allocate(b_par_loc(nuv_loc_b:nuv_loc_e,npot_p),a_c_loc(nu,npot_p_loc_b:npot_p_loc_e),stat=ier)
! if (ier /= 0) then
! print *,'Matrix_ep.f90: Can not allocate local arrays b_par_loc', rank
! stop
! endif
b_par_loc=0.;a_c_loc=0. ;b_c_loc=0.;a_ep_loc=0.;a_s_loc=0.
!write(340+rank,*) b_c_loc
!if(rank==0) write(100+rank,*) nd_e,npot_p,nuv,nu, n_dof_bnd
!=================================================
call bfield_par(b_par,x,y,z,xu,yu,zu,nuv,xp,yp,zp,ntri_p,jpot_p,npot_p)
!=================================================
!write(200+rank,*) npot_p_loc_b, npot_p_loc_e, npot_p_loc_b_m1
call bfield_par_par(b_par_loc,x,y,z,xu,yu,zu,nuv,npot_p_loc_b, npot_p_loc_e, xp,yp,zp,ntri_p,jpot_p)
if(rank==1 ) write(650+rank,*) -b_par_loc(:,320)
if(rank==2 ) write(660+rank,*) -b_par_loc(:,640)
if(rank==3 ) write(670+rank,*) -b_par_loc(:,960)
if(rank==0 ) write(750+rank,*) -b_par_loc(:,320)
if(rank==1 ) write(760+rank,*) -b_par_loc(:,640)
if(rank==2 ) write(770+rank,*) -b_par_loc(:,960)
if(rank==3 ) write(780+rank,*) -b_par_loc(:,1280)
if(rank==0 ) write(950+rank,*) -b_par(:,320)
if(rank==0 ) write(960+rank,*) -b_par(:,640)
if(rank==0 ) write(970+rank,*) -b_par(:,960)
if(rank==0 ) write(980+rank,*) -b_par(:,1280)
if (320>= npot_p_loc_b_m1 .AND. 320<=npot_p_loc_e ) write(300+rank,*) b_par_loc(:,320)
if (640>= npot_p_loc_b_m1 .AND. 640<=npot_p_loc_e ) write(310+rank,*) b_par_loc(:,640)
if(rank==0) write(3000+rank,*) sum(b_par)
test_sum=0
!CALL MPI_ALLREDUCE(sum(b_par_loc(nuv,npot_p_loc_b:npot_p_loc_e)), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
CALL MPI_ALLREDUCE(sum(b_par_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(3100+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD, ier)
!if(rank==0) write(6000+rank,*) sum(b_par)
!test_sum=0
!CALL MPI_ALLREDUCE(sum(b_par_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(6100+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD, ier)
!--------------------------------------------------------------------------
if (n_tor(1) .eq. 0) then
allocate(b_par0(nuv,npot_p),bx(nuv),by(nuv),bz(nuv),a0_c(nu),b0_c(2*N_bnd), stat=ier)
b_par0=0.; bx=0.; by=0.; bz=0. ; a0_c=0.; b0_c=0.
! if(rank>0) npot_p_loc_b=npot_p_loc_b+1
!write(700+rank,*) npot_p_loc_b,npot_p_loc_e
allocate(b_par0_loc(nuv,npot_p_loc_b:npot_p_loc_e), stat=ier)
if (ier /= 0) then
print *,'Matrix_ep.f90: Can not allocate local arrays b_par0_loc', rank
stop
endif
b_par0_loc=0.
call bfield_c(x,y,z,bx,by,bz,nuv,xp,yp,zp,phi0_p,ntri_p)
if(1>=npot_p_loc_b) then
do i=1,nuv
b_par0_loc(i,1) = -(xu(i)*bx(i)+yu(i)*by(i)+zu(i)*bz(i))
enddo
endif
!=================================================
do i=1,nuv
b_par0(i,1) = -(xu(i)*bx(i)+yu(i)*by(i)+zu(i)*bz(i))
enddo
!=================================================
!if(rank==0) write(9000+rank,*) sum(b_par)
!test_sum=0
!CALL MPI_ALLREDUCE(sum(b_par_loc(nuv,npot_p_loc_b:npot_p_loc_e)), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(9100+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD, ier)
!if(rank==0) write(3000+rank,*) sum(b_par0)
!test_sum=0
!CALL MPI_ALLREDUCE(sum(b_par0_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(3100+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD, ier)
!=================================================
do k=1,npot_p1
do i=1,nuv
b_par0(i,k+1) = -b_par(i,k)
enddo
enddo
!=================================================
if(rank==0) npot_p_loc_b=npot_p_loc_b+1
do k=npot_p_loc_b,npot_p_loc_e
do i=1,nuv
b_par0_loc(i,k) = -b_par_loc(i,k-1)
if(rank==1 .AND. k==321) write(550+rank,*) b_par0_loc(i,k), -b_par_loc(i,k-1)
if(rank==2 .AND. k==641) write(560+rank,*) b_par0_loc(i,k), -b_par_loc(i,k-1)
if(rank==3 .AND. k==961) write(570+rank,*) b_par0_loc(i,k), -b_par_loc(i,k-1)
enddo
enddo
if(rank==0) npot_p_loc_b=npot_p_loc_b-1
if(rank==0) write(900+rank,*) b_par0(:,321)
!if(rank==0) write(900+rank,*) b_par0(:,1:320)
!if(rank==0) write(910+rank,*) b_par0(:,321:640)
!if(rank==0) write(920+rank,*) b_par0(:,641:960)
!if(rank==0) write(930+rank,*) b_par0(:,961:1280)
!if(rank==0) write(950+rank,*) b_par0_loc(:,1:320)
!if(rank==1) write(960+rank,*) b_par0_loc(:,321:640)
!if(rank==2) write(970+rank,*) b_par0_loc(:,641:960)
!if(rank==3) write(980+rank,*) b_par0_loc(:,961:1280)
!write(1100+rank,*) sum(b_par0_loc)
! do k=1,npot_p1
! do i=nuv_loc_b,nuv_loc_e
! b_par0_loc(i,k+1) = -b_par_loc(i,k)
!
!
! b_par0_loc(i,k) = -b_par_loc(i,k-1)
!
! enddo
! enddo
if(rank==0) write(2000+rank,*) sum(b_par0)
test_sum=0
CALL MPI_ALLREDUCE(sum(b_par0_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(2100+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD, ier)
!=============================================
do k=1,npot_p
do ku=1,nu
do kv =1,nv
a_c(ku,k) = a_c(ku,k) + b_par0(ku+nu*(kv-1),k)*fnv
enddo
enddo
enddo
!=============================================
do k=npot_p_loc_b,npot_p_loc_e
!do k=1,npot_p
do ku=1,nu
do kv =1,nv
a_c_loc(ku,k) = a_c_loc(ku,k) + b_par0_loc(ku+nu*(kv-1),k)*fnv
enddo
enddo
enddo
!if(rank==0) write(4000+rank,*) sum(a_c)
!test_sum=0
!CALL MPI_ALLREDUCE(sum(a_c_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(4100+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD, ier)
!write(320+rank,*) b_c_loc
! write(4000+rank,*) N_bnd,n_points,npot_p,n_bnd_nodes,bnd_node,bnd_node_index,n_dof_bnd
call real_space2bezier(b_c,a_c,N_bnd,n_points,npot_p,n_bnd_nodes,bnd_node,bnd_node_index,n_dof_bnd,Lij)
call real_space2bezier_par(b_c_loc,a_c_loc,N_bnd,n_points,npot_p,n_bnd_nodes,bnd_node,bnd_node_index,n_dof_bnd,Lij, npot_p_loc_b,npot_p_loc_e)
write(*,*) ' matrix_ep ntor=0 : Nd_harm, Nd_harm0 ',Nd_harm, Nd_harm0
!if(rank==0) write(5000+rank,*) sum(b_c)
!test_sum=0
!CALL MPI_ALLREDUCE(sum(b_c_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!if(rank==0) write(5100+rank,*) test_sum
!call MPI_BARRIER(MPI_COMM_WORLD, ier)
!write(300+rank,*) b_c
!write(310+rank,*) b_c_loc
!=======================================================================
do i =1,N_bnd ! to be adapted: probably needs to be n_bnd_nodes