Commit 9280def6 authored by Serhiy Mochalskyy's avatar Serhiy Mochalskyy
Browse files

add new file get_index_dima; check deallocation

parent 9f4b30e5
Pipeline #2247 skipped
......@@ -3,6 +3,7 @@ include $(FILES_MK)
#FC = ifort
FC = mpiifort
#FC = mpif90
#FC = mpinagfor
#FFLAGS = -autodouble -I$(OBJ_DIR) -module $(OBJ_DIR) -openmp -g
FFLAGS = -autodouble -I$(OBJ_DIR) -module $(OBJ_DIR) -g
......
......@@ -68,6 +68,9 @@ do j=1,nd_bez
enddo
deallocate(a_pwe_arr , a_pwe_arr_tot)
if(rank==0) write(*,*) 'a_pwe_s_computing done'
if(rank==0) write(*,*) '==============================================================='
......
......@@ -50,6 +50,8 @@ endif
CALL PDGEMM('N','N', nd_w ,nd_bez, npot_p , 1.0D0 , a_wp_loc ,1,1,DESCA, a_pwe_loc_s,1,1,DESCB,&
1.0D0, a_we_loc,1 , 1 , DESCC)
deallocate(a_pwe_loc_s)
if(rank==0) write(*,*) 'a_we_computing complited'
if(rank==0) write(*,*) '==============================================================='
......
......@@ -57,7 +57,8 @@ F_MAIN_SRC = mod_icontr.f90\
d_ee_computing.f90\
mpi_and_scalapack_init.f90\
control_array_distribution.f90\
computing_s_ww_inverse.f90
computing_s_ww_inverse.f90\
get_index_dima.f90
F_MAIN_OBJ = $(addprefix $(OBJ_DIR)/,$(F_MAIN_SRC:.f90=.o))
MAIN_OBJ = $(F_MAIN_OBJ)
......@@ -202,30 +203,6 @@ $(OBJ_DIR)/d_ee_computing.o:\
$(OBJ_DIR)/computing_s_ww_inverse.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_sca.o\
$(OBJ_DIR)/mod_resistive.o
$(OBJ_DIR)/get_index_dima.o:\
$(OBJ_DIR)/mod_tri_p.o $(OBJ_DIR)/mod_icontr.o\
$(OBJ_DIR)/mod_contr_su.o
program starwall
program starwall
! ----------------------------------------------------------------------
use icontr
use mpi_v
implicit none
include "mpif.h"
use time
implicit none
include "mpif.h"
! Initizlization of MPI and Scalapack process grid
call MPI_and_Scalapack_Init
! --- Read input parameters from namelist file.
! --- Read input parameters from namelist file.
call input
integer i,MASTER
! --- Read the JOREK control boundary.
call control_boundary
call MPI_INIT(ierr)
if (ierr .ne. MPI_SUCCESS) then
print *,'Error starting MPI program. Terminating!!'
call MPI_ABORT(MPI_COMM_WORLD, rc, ierr)
endif
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr)
call MPI_GRID()
call input
! --- Read the JOREK control boundary.
call control_boundary
! --- Generation of the control surface triangles
call tri_contr_surf
call tri_contr_surf
! --- Discretization of the wall (generate wall triangles manually)
if ((nwall .gt. 0) .and. (iwall .eq. 1)) then
call surface_wall
end if
! --- Discretization of the wall (generate wall triangles manually)
if ((nwall .gt. 0) .and. (iwall .eq. 1)) then
call surface_wall
end if
! --- Read wall triangles
!if(nwall.gt.0.and.iwall.eq.2) then
!call read_wall_triangles
!end if
! --- Read external coils (not tested yet)
if (nu_coil .gt. 0) call read_coil_data
! --- Solve Laplace equation Delta Phi = 0 as a variational problem to determine
! the unkowns (current potentials Phi on wall triangle vertices)
! Boundary conditions: B_perp on control surface from JOREK
! B_perp on wall is zero (ideal conductor)
! --- Read wall triangles
!if(nwall.gt.0.and.iwall.eq.2) then
! call read_wall_triangles
!end if
! Check if the amount of MPI tasks more than array dimencions
call control_array_distribution
! Main solver
call solver
! --- Read external coils (not tested yet)
if (nu_coil .gt. 0) call read_coil_data
! --- Solve Laplace equation Delta Phi = 0 as a variational problem to determine
! the unkowns (current potentials Phi on wall triangle vertices)
! Boundary conditions: B_perp on control surface from JOREK
! B_perp on wall is zero (ideal conductor)
! --- Write out the response matrices
if (i_response .le. 1) then
call ideal_wall_response
endif
if (i_response .eq. 2) then
call resistive_wall_response
endif
call solver
call MPI_FINALIZE(ier)
! --- Write out the response matrices
if (i_response .le. 1) then
call ideal_wall_response
endif
if (i_response .eq. 2) then
call resistive_wall_response
endif
call MPI_FINALIZE(ierr)
end program starwall
......@@ -34,7 +34,6 @@ integer, dimension(:), allocatable :: displ
integer :: npot_p_loc_b_m1, npot_p_loc_b_copy
real ::test_sum
integer BLACS_PNUM
EXTERNAL BLACS_PNUM
......@@ -164,8 +163,6 @@ if (n_tor(1) .eq. 0) then
if(ip_proc_send>NPROCS-1) ip_proc_send=NPROCS-1
if( MYPNUM==ip_proc_send ) then
!a_ep_arr (k) = b_c_loc(2*i-1, k)
!a_ep_arr2(k) = b_c_loc(2*i, k)
a_ep_arr_s (k) = b_c_loc(2*i-1, k)
a_ep_arr_s2(k) = b_c_loc(2*i, k)
......@@ -174,9 +171,6 @@ if (n_tor(1) .eq. 0) then
enddo
!call MPI_ALLREDUCE(a_ep_arr, a_ep_arr_tot ,npot_p,MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
!call MPI_ALLREDUCE(a_ep_arr2,a_ep_arr_tot2,npot_p,MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
call MPI_ALLGATHERV(a_ep_arr_s, npot_p_loc_e-npot_p_loc_b+1,MPI_DOUBLE_PRECISION, &
a_ep_arr_tot, counts(1:NPROCS) ,displ(1:NPROCS), &
......@@ -318,9 +312,11 @@ do k=1, n_bnd_nodes ! loop over nodes NOT elements
enddo ! ! toroidal harmonics
deallocate(b_s1_loc, b_s2_loc, a_s1_loc,a_s2_loc, b_par_loc)
deallocate(a_ep_arr_tot, a_ep_arr_tot2,a_ep_arr_tot3,a_ep_arr_tot4)
deallocate(a_ep_arr_s, a_ep_arr_s2, a_ep_arr_s3,a_ep_arr_s4)
deallocate(counts,displ)
! ----------------------------------------------------------------------
......
......@@ -112,6 +112,11 @@ allocate(b_par_loc(nuv,npot_w_loc_b_m1 : npot_w_loc_e), &
!We need all this for MPI_ALLGATHERV subroutine
allocate(counts(NPROCS), displ(NPROCS),stat=ier)
if (ier /= 0) then
print *,'Matrix_ew.f90: Can not allocate local arrays counts and displ', rank
stop
endif
counts=step
counts(NPROCS)=npot_w+ncoil-step*(NPROCS-1)
displ(1) = 0
......@@ -311,6 +316,7 @@ enddo ! toroidal harmonics
deallocate(b_s1_loc, b_s2_loc, a_s1_loc,a_s2_loc, b_par_loc)
deallocate(a_ew_arr_tot, a_ew_arr_tot2,a_ew_arr_tot3,a_ew_arr_tot4)
deallocate(a_ew_arr_s, a_ew_arr_s2, a_ew_arr_s3,a_ew_arr_s4)
deallocate(counts,displ)
! ----------------------------------------------------------------------
if(rank==0) write(6,*) 'matrix_ew done'
......
......@@ -35,8 +35,7 @@ if (rank==0) write(6,*) 'compute matrix_pp npot_p=',npot_p
allocate(dxp(ntri_p,3), dyp(ntri_p,3), dzp(ntri_p,3), &
ipot_p(ntri_p,3), &
jx(ntri_p), jy(ntri_p), jz(ntri_p), &
ipot_p(ntri_p,3),jx(ntri_p), jy(ntri_p), jz(ntri_p), &
jdx(ntri_p), jdy(ntri_p), jdz(ntri_p), &
jdx2(ntri_p), jdy2(ntri_p), jdz2(ntri_p), &
x1(2*nu,3), y1(2*nu,3), z1(2*nu,3), stat=ier)
......@@ -141,9 +140,6 @@ enddo
jdy=jdy2
jdz=jdz2
deallocate(jdx2,jdy2,jdz2)
do i=1,ntri_p
do k=1,3
if (jpot_p(i,k).eq.npot_p) then
......@@ -155,8 +151,6 @@ do i=1,ntri_p
enddo
enddo
dima_sca3=0.0
do i =1,ntri_p
do i1=1,ntri_p
......@@ -247,40 +241,14 @@ do i=1,ntri_p
endif
endif
!deallocating all temporary arrays
deallocate(nx,ny,nz,tx1,ty1,tz1,tx2,ty2, &
tz2,tx3,ty3,tz3,d221,d232,d213,area,xm,ym,zm)
!deallocating all temporary arrays
deallocate(zm,ym,xm,area,d213,d232,d221,tz3,ty3,tx3,tz2,ty2,tx2, &
tz1,ty1,tx1,nz,ny,nx)
deallocate(z1,y1,x1,jdz2,jdy2,jdx2,jdz,jdy,jdx,jz,jy,jx,ipot_p,dzp,dyp,dxp)
if(rank==0) write(*,*) 'matrix pp done'
if(rank==0) write(*,*) '==============================================================='
end subroutine matrix_pp
subroutine get_index_dima(i,k,ku,j)
use tri_p !ntri_p
use icontr !nv
use contr_su !nu
implicit none
integer :: i,j,k,ku,nu2,kv,c
nu2 = 2*nu
!Calculation of the first index i->ku
do c=2,nv+1
if(((c-1)*nu2-i )>=0) then
kv=c-1
ku=i-(kv-1)*nu2
exit
endif
enddo
! Calculation of the second index k->j
if(nu2*(kv-1)<k) then
j=k-nu2*(kv-1)
else
j=ntri_p+k-nu2*(kv-1)
endif
end subroutine get_index_dima
......@@ -160,9 +160,10 @@ subroutine matrix_rw
enddo
enddo
deallocate(ipot_w,jz,jy,jx,area1,dzw,dyw,dxw)
if(rank==0) write(*,*) 'matrix_rw done'
if(rank==0) write(*,*) '==============================================================='
if(rank==0) write(*,*) '==============================================================='
end subroutine matrix_rw
......@@ -317,6 +317,7 @@ do i=1,ntri_w
enddo
deallocate(ipot_w,ipot_p,jz_p,jy_p,jx_p,jz,jy,jx,jwz,jwy,jwx, &
jpz,jpy,jpx,dzw,dyw,dxw,dzp,dyp,dxp )
......
......@@ -134,18 +134,14 @@ subroutine matrix_ww
do i1=1,ntri_w
do k =1,3
j = ipot_w(i,k) + 1
call ScaLAPACK_mapping_i2(j,i_loc,inside_i)
if (inside_i /= .true.) then! If not inside
! With ScaLAPACK mapping the computation time is about 2-3 times higher than in
! previous version with interval. It is because in ScaLAPACK version subroutine
! tri_induct_3_2 is called more times.
call ScaLAPACK_mapping_i(j,i_loc,inside_i)
if (inside_i == .true.) then! If not inside
do k1=1,3
j1 = ipot_w(i1,k1) + 1
call ScaLAPACK_mapping_j(j1,j_loc,inside_j)
if (inside_j /= .true.) then! If not inside
else
if (inside_j == .true.) then! If not inside
dima_sca=0
dima_sca2=0
call tri_induct_3_2(ntri_w,ntri_w,i,i1,xw,yw,zw,dima_sca)
......
......@@ -55,6 +55,7 @@ if(rank==0) write(*,*) ' resistive_wall_response nd_bez :',nd_bez
!======================================================
call simil_trafo(a_ww_loc,a_rw_loc,n_w,gamma,S_ww_loc)
!======================================================
deallocate(a_ww_loc,a_rw_loc)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time15=MPI_WTIME()
......
......@@ -77,13 +77,6 @@
!this setting yields the most orthogonal eigenvectors.
ABSTOL =PDLAMCH(CONTEXT, 'U')
!ORFAC specifies the precision of which eigenvectors should be reorthogonalized
!If the INFO_SCA=2 the manipulation of this parameters will give a correct results.
!ORFAC = 0.0001
!ORFAC =0
!If ORFAC=0 No reorthogonalization is done.
!ORFAC = 0.00000001
!Here we calculate just the workspace for th PDSYGVX (if variable LWORK and ILWORK=-1)
CALL PDSYGVX( IBTYPE, 'V', 'A', 'U', nw, a_ww_loc, 1, 1, DESCA, a_rw_loc, 1, 1,&
DESCB, VL, VU, IL, IU, ABSTOL, M_out, NZ_out, gamma,&
......
......@@ -206,6 +206,8 @@ call MPI_BARRIER(MPI_COMM_WORLD,ier)
!==================================================================
call matrix_multiplication
!==================================================================
deallocate(a_wp_loc,a_pwe_loc,a_ep_loc_sca)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment