Commit 602f94d1 authored by Serhiy Mochalskyy's avatar Serhiy Mochalskyy
Browse files

add output

parent 4e75df8b
Pipeline #2789 skipped
# config.in file for the LINUX CLUSTER in Garching
include $(FILES_MK)
#FC = ifort
FC = mpiifort
#FC = mpif90
#FC = mpinagfor
#FFLAGS = -autodouble -I$(OBJ_DIR) -module $(OBJ_DIR) -openmp -g
FFLAGS = -autodouble -I$(OBJ_DIR) -module $(OBJ_DIR) -g
ifeq ($(DEBUG),yes)
FFLAGS += -O0 -warn all,nounused -check all,noarg_temp_created -debug all -debug-parameters -fstack-security-check -ftrapuv -traceback
else
# FFLAGS +=-r8 -O0 -check all -fbounds-check -fpe0 -fpe-all=0 -fno-stack-protector-all -finit-real=nan -g -traceback -fp-stack-check -save-temps,nodec,interfaces -gen_interfaces
# FFLAGS += -O0 -fp-model precise
# FFLAGS += -fpe3 -g -traceback
# FFLAGS += -O2 -qopt-report=5 -qopt-report-phase=vec
FFLAGS += -O3
# FFLAGS += -O0 -warn all,nounused -check all,noarg_temp_created -debug all -debug-parameters -fstack-security-check -ftrapuv -traceback
FFLAGS += -O3
# FFLAGS += -O0 -warn all,nounused -check all,noarg_temp_created -debug all -debug-parameters -fstack-security-check -ftrapuv -traceback
endif
FPPFLAGS =
......
......@@ -58,7 +58,15 @@ F_MAIN_SRC = mod_icontr.f90\
mpi_and_scalapack_init.f90\
control_array_distribution.f90\
computing_s_ww_inverse.f90\
get_index_dima.f90
get_index_dima.f90\
output.f90\
print_starwall_response.f90\
print_starwall_d_yy.f90\
print_starwall_m_ye.f90\
print_starwall_m_ey.f90\
print_starwall_m_ee.f90\
print_starwall_files_vtk.f90\
print_starwall_d_ee.f90
F_MAIN_OBJ = $(addprefix $(OBJ_DIR)/,$(F_MAIN_SRC:.f90=.o))
MAIN_OBJ = $(F_MAIN_OBJ)
......@@ -75,7 +83,7 @@ $(OBJ_DIR)/.o: \
$(OBJ_DIR)/input.o: \
$(OBJ_DIR)/mod_icontr.o $(OBJ_DIR)/mod_contr_su.o\
$(OBJ_DIR)/mod_coil2d.o $(OBJ_DIR)/mod_sca.o\
$(OBJ_DIR)/mod_mpi_v.o
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o
$(OBJ_DIR)/control_boundary.o: \
$(OBJ_DIR)/mod_icontr.o $(OBJ_DIR)/mod_contr_su.o\
$(OBJ_DIR)/mod_gauss.o $(OBJ_DIR)/mod_mpi_v.o
......@@ -206,3 +214,32 @@ $(OBJ_DIR)/computing_s_ww_inverse.o:\
$(OBJ_DIR)/get_index_dima.o:\
$(OBJ_DIR)/mod_tri_p.o $(OBJ_DIR)/mod_icontr.o\
$(OBJ_DIR)/mod_contr_su.o
$(OBJ_DIR)/print_starwall_response.o: \
$(OBJ_DIR)/mod_solv.o $(OBJ_DIR)/mod_coil2d.o\
$(OBJ_DIR)/mod_tri_w.o $(OBJ_DIR)/mod_icontr.o\
$(OBJ_DIR)/mod_contr_su.o $(OBJ_DIR)/mod_time.o\
$(OBJ_DIR)/mod_tri_p.o $(OBJ_DIR)/mod_sca.o\
$(OBJ_DIR)/mod_resistive.o
$(OBJ_DIR)/mpi_and_scalapack_init.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_time.o
$(OBJ_DIR)/output.o:\
$(OBJ_DIR)/mod_mpi_v.o
$(OBJ_DIR)/print_starwall_d_yy.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o
$(OBJ_DIR)/print_starwall_m_ye.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o\
$(OBJ_DIR)/mod_solv.o $(OBJ_DIR)/mod_sca.o
$(OBJ_DIR)/print_starwall_m_ey.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o\
$(OBJ_DIR)/mod_solv.o $(OBJ_DIR)/mod_sca.o
$(OBJ_DIR)/print_starwall_m_ee.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o\
$(OBJ_DIR)/mod_solv.o $(OBJ_DIR)/mod_sca.o
$(OBJ_DIR)/print_starwall_files_vtk.o: \
$(OBJ_DIR)/mod_coil2d.o $(OBJ_DIR)/mod_tri_w.o\
$(OBJ_DIR)/mod_icontr.o $(OBJ_DIR)/mod_contr_su.o\
$(OBJ_DIR)/mod_tri_p.o
$(OBJ_DIR)/print_starwall_d_ee.o:\
$(OBJ_DIR)/mod_mpi_v.o $(OBJ_DIR)/mod_resistive.o\
$(OBJ_DIR)/mod_solv.o $(OBJ_DIR)/mod_sca.o
......@@ -6,6 +6,8 @@ subroutine input
use sca
use mpi_v
use resistive
implicit none
include "mpif.h"
integer :: i
......@@ -13,6 +15,7 @@ subroutine input
! Namelist with input parameters
namelist / params / i_response, n_harm, n_tor, nv, delta, n_points, nwall, iwall, nu_coil
namelist / params_scalapack / NB, ORFAC, lwork_cooficient
namelist / params_output / format_type
! --- Output code information
if(rank==0) write(outp,*) '-------------------------------------------'
if(rank==0) write(outp,*) 'STARWALL-JOREK vacuum response code'
......@@ -56,19 +59,20 @@ subroutine input
!==========================================================================
! --- Log input parameters
if(rank==0) write(outp,'(A,I4)')
if(rank==0) write(outp,*) 'Input parameters:'
if(rank==0) write(outp,'(A,I4)') ' i_response =', i_response
if(rank==0) write(outp,'(A,I4)') ' n_harm =', n_harm
if(rank==0) write(outp,'(A,20I4)') ' n_tor =', n_tor(1:n_harm)
if(rank==0) write(outp,'(A,I4)') ' nv =', nv
if(rank==0) write(outp,'(A,ES10.3)') ' delta =', delta
if(rank==0) write(outp,'(A,I4)') ' n_points =', n_points
if(rank==0) write(outp,'(A,I4)') ' nwall =', nwall
if(rank==0) write(outp,'(A,I4)') ' iwall =', iwall
if(rank==0) write(outp,'(A,I4)') ' nu_coil =', nu_coil
if(rank==0) write(outp,'(A,I4)')
if(rank==0) then
write(outp,'(A,I4)')
write(outp,*) 'Input parameters:'
write(outp,'(A,I4)') ' i_response =', i_response
write(outp,'(A,I4)') ' n_harm =', n_harm
write(outp,'(A,20I4)') ' n_tor =', n_tor(1:n_harm)
write(outp,'(A,I4)') ' nv =', nv
write(outp,'(A,ES10.3)') ' delta =', delta
write(outp,'(A,I4)') ' n_points =', n_points
write(outp,'(A,I4)') ' nwall =', nwall
write(outp,'(A,I4)') ' iwall =', iwall
write(outp,'(A,I4)') ' nu_coil =', nu_coil
write(outp,'(A,I4)')
endif
! Only one task read data from input file and send them after to all tasks
if(rank==0) read(inp, params_scalapack) ! Read namelist from STDIN
......@@ -77,11 +81,21 @@ subroutine input
call MPI_BCAST(ORFAC, 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ier)
call MPI_BCAST(lwork_cooficient, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ier)
!===================================================================================
if(rank==0) then
write(outp,*) ' ScaLAPACK Input parameters:'
write(outp,'(A,I4)') ' NB =', NB
write(outp,'(A,ES10.3)') ' ORFAC =', ORFAC
write(outp,'(A,I4)')
endif
if(rank==0) write(outp,*) ' ScaLAPACK Input parameters:'
if(rank==0) write(outp,'(A,I4)') ' NB =', NB
if(rank==0) write(outp,'(A,ES10.3)') ' ORFAC =', ORFAC
if(rank==0) write(outp,'(A,I4)')
!Only one task read file type because only one tast will write afterwards data to the file.
if(rank==0) read(inp, params_output) ! Read namelist from STDIN
if(rank==0) then
write(outp,'(A,I4)')
write(outp,*) 'Output starwall-response.dat:'
write(outp,'(A,A)') ' file format = ',format_type
endif
! --- Check input parameters
if ( ( i_response < 0 ) .or. ( i_response > 2 ) ) then
......@@ -110,6 +124,12 @@ subroutine input
write(outp,*) 'ERROR: lwork_cooficient must have a value between >1'
stop 1
end if
if ( nu_coil >0 ) then
write(outp,*) 'ERROR: nu_coil does not include in this code version. &
The value should be 0'
stop 1
end if
......
......@@ -5,5 +5,9 @@ module resistive
real,dimension( :),allocatable :: gamma
integer :: n_w
integer :: n_tor_jorek, i_tor_jorek(999)
real, allocatable :: xyzpot_w(:,:)
character(len=64) :: format_type
character(len=512) :: char512
end module resistive
......@@ -2,6 +2,7 @@
! ----------------------------------------------------------------------
use mpi_v
use time
implicit none
include "mpif.h"
......@@ -11,10 +12,13 @@
print *,'Error starting MPI program. Terminating!!'
call MPI_ABORT(MPI_COMM_WORLD, ERRORCODE, ier)
endif
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ier)
call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ier)
!In order to measure total wallclock time
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time1=MPI_WTIME()
!Distribute MPI task on the ScaLapack process grid
call MPI_GRID()
......
......@@ -15,19 +15,10 @@ use resistive
implicit none
include "mpif.h"
real, dimension(:,:),allocatable :: a_ye,a_ey,d_ee,S_ww
integer :: i,j,k,lwork,i_loc,j_loc
integer :: n_tor_jorek, i_tor_jorek(999)
integer, allocatable :: ipiv(:)
real, allocatable :: s_ww_inv(:,:), xyzpot_w(:,:), work(:)
character(len=64) :: format_type
character(len=512) :: char512
integer :: i,j,k,i_loc,j_loc
real :: test_sum=0.
!-----------------------------------------------------------------------
if(rank==0) write(*,*) 'resistive_wall_response starts'
......@@ -57,59 +48,18 @@ call simil_trafo(a_ww_loc,a_rw_loc,n_w,gamma,S_ww_loc)
!======================================================
deallocate(a_ww_loc,a_rw_loc)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time15=MPI_WTIME()
if(rank==0)write(240+rank,*) time15-time14
call MPI_BARRIER(MPI_COMM_WORLD,ier)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
if(rank==0) write(80000+rank,*) sum(gamma)
!if(rank==0) write(81000+rank,*) gamma
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!=================================================
call a_ye_computing
!=================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time16=MPI_WTIME()
if(rank==0)write(250+rank,*) time16-time15
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!=================================================
call a_ey_computing
!=================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time17=MPI_WTIME()
if(rank==0)write(260+rank,*) time17-time16
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!=================================================
call d_ee_computing
!=================================================
!call MPI_BARRIER(MPI_COMM_WORLD,ier)
time17=MPI_WTIME()
if(rank==0)write(270+rank,*) time17-time16
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(d_ee_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(50000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
n_tor_jorek = 0
do i = 1, n_harm
......@@ -127,41 +77,18 @@ end do
call computing_s_ww_inverse
!=================================================
time18=MPI_WTIME()
if(rank==0)write(280+rank,*) time18-time17
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(s_ww_inv_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(51000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
! CALL DESCINIT(DESCC,nd_bez, nd_bez, NB, NB, 0, 0, CONTEXT, LDA_dee, INFO_C )
! if(INFO_C .NE. 0) then
! write(6,*) "Something is wrong in d_ee_computing CALL DESCINIT DESCC, INFO_C=",INFO_C
! stop
! endif
!=================================================
call output
!=================================================
! allocate(d_ee(nd_bez,nd_bez), stat=ier)
! IF (IER /= 0) THEN
! WRITE (*,*) "resistive_wall_response, can not allocate local matrix d_ee MYPROC_NUM=",MYPNUM
! STOP
! END IF
! d_ee=0.
! DO i_loc = 1,nd_bez
! DO j_loc = 1,nd_bez
! CALL pdelget('A','D',d_ee(i_loc,j_loc), d_ee_loc,i_loc,j_loc,DESCC)
! if(MYPNUM==0) write(90000+MYPNUM,*) i_loc, j_loc, d_ee(i_loc,j_loc)
! END DO
! END DO
if(rank==0) write(*,*) 'Program FINISH'
if(rank==0) write(*,*) '==============================================================='
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time2=MPI_WTIME()
if(rank==0) write(*,*) 'Program FINISH'
if(rank==0) write(*,*) 'Total wall clock time =', (time2-time1)/3600.0, ' hours'
if(rank==0) write(*,*) '==============================================================='
end subroutine resistive_wall_response
......@@ -29,115 +29,43 @@ if(rank==0) write(*,*) ' SOLVER: nd_w : ',nd_w
if(rank==0) write(*,*) ' SOLVER: nd_we : ',nd_we
if(rank==0) write(*,*) ' SOLVER: npot_p : ',npot_p
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time1=MPI_WTIME()
!------------------------------------------------------------------
!!!!!call perfon('ma_pp')
!==================================================================
call matrix_pp
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time2=MPI_WTIME()
if(rank==0)write(110+rank,*) time2-time1
!!!!!call perfoff
!!!!!call perfon('ma_wp')
!==================================================================
call matrix_wp
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time3=MPI_WTIME()
if(rank==0)write(120+rank,*) time3-time2
!!!!!call perfoff
!!!!!call perfon('ma_ww')
!==================================================================
call matrix_ww
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time4=MPI_WTIME()
if(rank==0)write(130+rank,*) time4-time3
!!!!!call perfoff
!!!!!call perfon('ma_rw')
!==================================================================
call matrix_rw
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time5=MPI_WTIME()
if(rank==0)write(140+rank,*) time5-time4
!!!!!call perfoff
if (nu_coil.ne.0) then
!!!!!call perfon('coil')
call matrix_cc
call matrix_cp
call matrix_wc
call matrix_rc
!!!!!call perfoff
endif
!!!!!call perfon('ma_pe')
!==================================================================
call matrix_pe
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time6=MPI_WTIME()
if(rank==0)write(150+rank,*) time6-time5
!!!!!call perfoff
!!!!!call perfon('ma_ep!')
!==================================================================
call matrix_ep
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time7=MPI_WTIME()
if(rank==0)write(160+rank,*) time7-time6
!!!!!call perfoff
!!!!!call perfon('ma_ew')
!==================================================================
call matrix_ew
!==================================================================
!!!!!call perfoff
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time8=MPI_WTIME()
if(rank==0)write(170+rank,*) time8-time7
call MPI_BARRIER(MPI_COMM_WORLD,ier)
if (nu_coil.ne.0) then
!!!!!call perfon('ma_ec')
call matrix_ec
!!!!!call perfoff
endif
if(rank==0) write(*,'(A,i6)') 'SOLVER : npot_p : ',npot_p
......@@ -151,89 +79,25 @@ call cholesky_solver
deallocate(a_pp_loc)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time9=MPI_WTIME()
if(rank==0)write(180+rank,*) time9-time8
!==================================================================
call a_pwe_s_computing
!==================================================================
test_sum=0
CALL MPI_ALLREDUCE(sum(a_pwe_loc_s), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(10000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time10=MPI_WTIME()
if(rank==0)write(190+rank,*) time10-time9
!==================================================================
call a_ee_computing
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time11=MPI_WTIME()
if(rank==0)write(200+rank,*) time11-time10
!==================================================================
call a_ew_computing
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time12=MPI_WTIME()
if(rank==0)write(210+rank,*) time12-time11
!==================================================================
call a_we_computing
!==================================================================
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time13=MPI_WTIME()
if(rank==0)write(220+rank,*) time13-time12
call MPI_BARRIER(MPI_COMM_WORLD,ier)
!==================================================================
call matrix_multiplication
!==================================================================
deallocate(a_wp_loc,a_pwe_loc,a_ep_loc_sca)
call MPI_BARRIER(MPI_COMM_WORLD,ier)
time14=MPI_WTIME()
if(rank==0)write(230+rank,*) time14-time13
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_we_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(23000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ew_loc_sca), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(24000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ee_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(25000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
test_sum=0
CALL MPI_ALLREDUCE(sum(a_ww_loc), test_sum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ier)
if(rank==0) write(33000+rank,*) test_sum
call MPI_BARRIER(MPI_COMM_WORLD,ier)
end subroutine solver
......@@ -190,36 +190,43 @@ if(rank==0) write(outp,*)
end do
npot_w = nwuv
npot_w1 = npot_w-1
if(rank==0) write(6,*)
if(rank==0) write(6,*) ' number of wall triangles ntri_w = ',ntri_w
if(rank==0) write(6,*) 'current potential array npot_w = ',npot_w
! open(60, iostat=ier, file='resistive_wall',&
! status='NEW',form='FORMATTED')
! write(60,3000) npot_w/2, 4
! 3000 format(2i7)
!Commented for beteer performance, unkomented in the end of the project
! do kv=1,nwv
! do ku=1,nwu
! j = ku+nwu*(kv-1)
! i = 2*j-1
! write(60,3300) xw(i ,1),yw(i ,1),zw(i ,1)
! write(60,3300) xw(i ,2),yw(i ,2),zw(i ,2)
! write(60,3300) xw(i+1,1),yw(i+1,1),zw(i+1,1)
! write(60,3300) xw(i ,3),yw(i ,3),zw(i ,3)
! 3300 format(1p3e14.6)
! end do
! end do
! close(60)
! write(32,*) 'Dreiecke'
! write(32,*) 1
! write(32,*) nwu*nwv*2
! do i=1,2*nwu*nwv
! write(32,*) i,1.0
! do k=1,3
! write(32,3300) xw(i,k),yw(i,k),zw(i,k)
! end do
! end do
if(rank==0) then
write(6,*) ' '
write(6,*) 'number of wall triangles ntri_w = ',ntri_w
write(6,*) 'current potential array npot_w = ',npot_w
endif
if (rank==0) then
open(60, iostat=ier, file='resistive_wall',&
status='NEW',form='FORMATTED')
write(60,3000) npot_w/2, 4
3000 format(2i7)
do kv=1,nwv
do ku=1,nwu
j = ku+nwu*(kv-1)
i = 2*j-1
write(60,3300) xw(i ,1),yw(i ,1),zw(i ,1)
write(60,3300) xw(i ,2),yw(i ,2),zw(i ,2)
write(60,3300) xw(i+1,1),yw(i+1,1),zw(i+1,1)
write(60,3300) xw(i ,3),yw(i ,3),zw(i ,3)
3300 format(1p3e14.6)
end do
end do
close(60)
write(32,*) 'Dreiecke'
write(32,*) 1
write(32,*) nwu*nwv*2
do i=1,2*nwu*nwv
write(32,*) i,1.0
do k=1,3
write(32,3300) xw(i,k),yw(i,k),zw(i,k)
end do
end do