Commit e8df7704 authored by Serhiy Mochalskyy's avatar Serhiy Mochalskyy
Browse files

modified main and add computing_s_ww_inverse.f90

parent 9280def6
Pipeline #2248 skipped
subroutine computing_s_ww_inverse
use mpi_v
use sca
use resistive
!-----------------------------------------------------------------------
implicit none
include "mpif.h"
integer, allocatable :: ipiv(:)
real, dimension(:),allocatable :: WORK
integer,dimension(:),allocatable :: IWORK
integer (KIND=8) LWORK
integer :: LIWORK
if(rank==0) write(*,*) "computing_s_ww_inverse starts"
!Set up scalapack sub grid
call SCA_GRID(n_w,n_w)
!allocate local matrix
allocate(s_ww_inv_loc(MP_A,NQ_A), stat=ier)
IF (IER /= 0) THEN
WRITE (*,*) " Can not allocate local matrix s_ww_inv_loc: MY_PROC_NUM=",MYPNUM
STOP
END IF
LDA_s_ww_inv=LDA_A
s_ww_inv_loc=0.
! --- Determine inverse of S_ww matrix
allocate( ipiv(n_w),stat=ier )
IF (IER /= 0) THEN
WRITE (*,*) " Can not allocate local array ipiv: MY_PROC_NUM=",MYPNUM
STOP
END IF
s_ww_inv_loc(:,:) = s_ww_loc(:,:)
CALL DESCINIT(DESCA,n_w, n_w, NB, NB, 0, 0, CONTEXT, LDA_s_ww_inv, INFO_A)
CALL PDGETRF( n_w , n_w , s_ww_inv_loc , 1 , 1 , DESCA , IPIV , INFO )
if ( INFO /= 0 ) then
write(*,*) 'ERROR calling PDGETRF when computing inverse of S_ww_loc. info=', INFO
stop
endif
allocate(WORK(1),IWORK(1), stat=ier)
IF (IER /= 0) THEN
WRITE (*,*) " Can not allocate local matrix WORK, IWORK: MY_PROC_NUM=",MYPNUM
STOP
END IF
WORK=0
IWORK=0
LWORK=-1
LIWORK=-1
!Here we calculate only the workspace
CALL PDGETRI( n_w , s_ww_inv_loc , 1 , 1 ,DESCA ,IPIV, WORK , LWORK , IWORK , LIWORK , INFO )
if ( INFO /= 0 ) then
write(*,*) 'ERROR calling PDGETRI when computing inverse of S_ww_loc. info=', INFO
stop
endif
lwork =lwork_cooficient*INT(ABS(work(1)))+1
if(rank==0) write(6,*) " Need to allocate = ", REAL(lwork*8.0/1024.0/1024.0/1024.0), "GB workspace array"
DEALLOCATE(work)
ALLOCATE(work(LWORK),stat=ier)
IF (IER /= 0) THEN
WRITE (*,*) "computing_s_ww_inverse: Can not allocate WORK: MY_PROC_NUM=",MYPNUM, "LWORK=",LWORK
STOP
END IF
liwork =lwork_cooficient*INT (ABS(iwork(1)))
DEALLOCATE(iwork)
ALLOCATE(iwork(liwork),stat=ier)
IF (IER /= 0) THEN
WRITE (*,*) "computing_s_ww_inverse: Can not allocate IWORK: MY_PROC_NUM=",MYPNUM
STOP
END IF
if(rank==0) write(6,*) " Need to allocate 2 = ", REAL(liwork*8.0/1024.0/1024.0/1024.0), "GB workspace array"
! Here calculation is performed
CALL PDGETRI( n_w , s_ww_inv_loc , 1 , 1 ,DESCA ,IPIV, WORK , LWORK , IWORK , LIWORK , INFO )
if ( INFO /= 0 ) then
write(*,*) 'ERROR calling PDGETRI when computing inverse of S_ww_loc. info=', INFO
stop
endif
deallocate( ipiv, work, iwork )
if(rank==0) write(*,*) "computing_s_ww_inverse complited"
if(rank==0) write(*,*) '==============================================================='
end subroutine computing_s_ww_inverse
subroutine get_index_dima(i,k,ku,j)
! This subroutine is used only in subroutine matrix_pp
! in order to avoid storage of matrix dima
use tri_p !ntri_p
use icontr !nv
use contr_su !nu
implicit none
integer :: i,j,k,ku,nu2,kv,c
nu2 = 2*nu
!Calculation of the first index i->ku
do c=2,nv+1
if(((c-1)*nu2-i )>=0) then
kv=c-1
ku=i-(kv-1)*nu2
exit
endif
enddo
! Calculation of the second index k->j
if(nu2*(kv-1)<k) then
j=k-nu2*(kv-1)
else
j=ntri_p+k-nu2*(kv-1)
endif
end subroutine get_index_dima
program starwall
program starwall
! ----------------------------------------------------------------------
use icontr
use mpi_v
use time
implicit none
include "mpif.h"
implicit none
include "mpif.h"
! --- Read input parameters from namelist file.
! Initizlization of MPI and Scalapack process grid
call MPI_and_Scalapack_Init
integer i,MASTER
! --- Read input parameters from namelist file.
call input
call MPI_INIT(ierr)
if (ierr .ne. MPI_SUCCESS) then
print *,'Error starting MPI program. Terminating!!'
call MPI_ABORT(MPI_COMM_WORLD, rc, ierr)
endif
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr)
call MPI_GRID()
call input
! --- Read the JOREK control boundary.
! --- Read the JOREK control boundary.
call control_boundary
call control_boundary
! --- Generation of the control surface triangles
call tri_contr_surf
call tri_contr_surf
! --- Discretization of the wall (generate wall triangles manually)
if ((nwall .gt. 0) .and. (iwall .eq. 1)) then
call surface_wall
end if
! --- Discretization of the wall (generate wall triangles manually)
if ((nwall .gt. 0) .and. (iwall .eq. 1)) then
call surface_wall
end if
! --- Read wall triangles
!if(nwall.gt.0.and.iwall.eq.2) then
! call read_wall_triangles
!end if
! --- Read wall triangles
!if(nwall.gt.0.and.iwall.eq.2) then
!call read_wall_triangles
!end if
! --- Read external coils (not tested yet)
if (nu_coil .gt. 0) call read_coil_data
! --- Read external coils (not tested yet)
if (nu_coil .gt. 0) call read_coil_data
! --- Solve Laplace equation Delta Phi = 0 as a variational problem to determine
! the unkowns (current potentials Phi on wall triangle vertices)
! Boundary conditions: B_perp on control surface from JOREK
! B_perp on wall is zero (ideal conductor)
! --- Solve Laplace equation Delta Phi = 0 as a variational problem to determine
! the unkowns (current potentials Phi on wall triangle vertices)
! Boundary conditions: B_perp on control surface from JOREK
! B_perp on wall is zero (ideal conductor)
! Check if the amount of MPI tasks more than array dimencions
call control_array_distribution
call solver
! Main solver
call solver
! --- Write out the response matrices
if (i_response .le. 1) then
call ideal_wall_response
endif
if (i_response .eq. 2) then
call resistive_wall_response
endif
! --- Write out the response matrices
if (i_response .le. 1) then
call ideal_wall_response
endif
if (i_response .eq. 2) then
call resistive_wall_response
endif
call MPI_FINALIZE(ier)
call MPI_FINALIZE(ierr)
end program starwall
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment