Commit 92120543 authored by Cristian Lalescu's avatar Cristian Lalescu
Browse files

Merge branch 'develop' into feature/collisions

I want the merge in the feature branch because I want to ensure we're
using the same mechanism for custom code etc
parents 20cc0f06 a11e94f0
......@@ -54,6 +54,7 @@ project(TurTLE
LANGUAGES CXX)
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/morse ${CMAKE_MODULE_PATH})
set(TURTLE_LIBS "")
......@@ -62,6 +63,79 @@ if(NDEBUG)
add_definitions(-DNDEBUG)
endif()
# taken from https://vicrucann.github.io/tutorials/quick-cmake-doxygen/
# indicate the documentation build as an option and set it to ON by default
option(BUILD_DOC "Build documentation" OFF)
# check if Doxygen is installed
find_package(Doxygen)
if (DOXYGEN_FOUND)
# set input and output files
set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/documentation/cpp/cpp_config)
set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
# request to configure the file
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
message("Doxygen build started")
# note the option ALL which allows to build the docs together with the application
add_custom_target( doc_doxygen
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen"
VERBATIM )
else (DOXYGEN_FOUND)
message("Doxygen needs to be installed to generate the doxygen documentation")
endif (DOXYGEN_FOUND)
# check if sphinx is available
find_package(Sphinx)
if (SPHINX_FOUND)
if(NOT DEFINED SPHINX_THEME)
set(SPHINX_THEME default)
endif()
if(NOT DEFINED SPHINX_THEME_DIR)
set(SPHINX_THEME_DIR)
endif()
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx_doctrees")
# HTML output directory
set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx_html")
# LaTeX output directory
set(SPHINX_LATEX_DIR "${CMAKE_CURRENT_BINARY_DIR}/sphinx_latex")
configure_file(
"${PROJECT_SOURCE_DIR}/documentation/conf.py.in"
"${BINARY_BUILD_DIR}/conf.py"
@ONLY)
add_custom_target(doc_html
${SPHINX_EXECUTABLE}
-q -b html
-c "${BINARY_BUILD_DIR}"
-d "${SPHINX_CACHE_DIR}"
"${PROJECT_SOURCE_DIR}/documentation"
"${SPHINX_HTML_DIR}"
COMMENT "Building HTML documentation with Sphinx")
add_custom_target(doc_latex
${SPHINX_EXECUTABLE}
-q -b latex
-c "${BINARY_BUILD_DIR}"
-d "${SPHINX_CACHE_DIR}"
"${PROJECT_SOURCE_DIR}/documentation"
"${SPHINX_LATEX_DIR}"
COMMENT "Building LaTeX documentation with Sphinx")
else (SPHINX_FOUND)
message("Sphinx needs to be installed to generate the full documentation")
endif (SPHINX_FOUND)
option(TIMING_OUTPUT "Toggle timing output. WARNING: memory usage is proportional to `niter_todo`" OFF)
if(TIMING_OUTPUT)
add_definitions(-DUSE_TIMINGOUTPUT)
......@@ -188,10 +262,13 @@ set(cpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/full_code/direct_numerical_simulation.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVE.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/static_field.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/kraichnan_field.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/joint_acc_vel_stats.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/filter_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/write_filtered_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/Gauss_field_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/symmetrize_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_output_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/get_rfields.cpp
......@@ -224,16 +301,22 @@ set(cpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVEparticles.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVEcomplex_particles.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVEp_extra_sampling.cpp
${PROJECT_SOURCE_DIR}/cpp/particles/particles_inner_computer.cpp)
${PROJECT_SOURCE_DIR}/cpp/particles/particles_inner_computer.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/ornstein_uhlenbeck_process.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/ou_vorticity_equation.cpp)
set(hpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/full_code/code_base.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/direct_numerical_simulation.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVE.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/static_field.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/kraichnan_field.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/joint_acc_vel_stats.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/filter_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/Gauss_field_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/write_filtered_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/symmetrize_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_output_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/get_rfields.hpp
......@@ -303,6 +386,8 @@ set(hpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/omputils.hpp
${PROJECT_SOURCE_DIR}/cpp/shared_array.hpp
${PROJECT_SOURCE_DIR}/cpp/spline.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/ornstein_uhlenbeck_process.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/ou_vorticity_equation.hpp
)
#file(GLOB_RECURSE hpp_for_lib ${PROJECT_SOURCE_DIR}/*.hpp)
LIST(APPEND source_files ${hpp_for_lib} ${cpp_for_lib})
......@@ -334,4 +419,3 @@ else()
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/pc_host_info.py ${PROJECT_BINARY_DIR}/python/TurTLE/host_info.py)")
endif()
install(CODE "execute_process(COMMAND python3 ${PROJECT_SOURCE_DIR}/setup.py install --force --prefix=${CMAKE_INSTALL_PREFIX} WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/python/)")
.. image:: https://gitlab.mpcdf.mpg.de/mjr/bfps/badges/develop/build.svg
:target: https://gitlab.mpcdf.mpg.de/mjr/bfps/commits/develop
.. only:: html
.. image:: https://gitlab.mpcdf.mpg.de/mjr/bfps/badges/develop/build.svg
:target: https://gitlab.mpcdf.mpg.de/mjr/bfps/commits/develop
=========================================
Turbulence Tools: Lagrangian and Eulerian
=========================================
In brief, this code runs pseudospectral direct numerical simulations
(DNS) of the incompressible Navier-Stokes equations, using FFTW 3, and
it can integrate particle trajectories in the resulting fields.
The Navier-Stokes solver has been extensively tested (tests are included
in the repository), and it is working as expected.
Parameters and statistics are stored in HDF5 format, together with code
information, so simulation data should be "future proof" --- suggestions
of possible improvements to the current approach are always welcome.
The primary aim of TurTLE is to reduce the time spent on setting up and
baby sitting DNS, as well as simplify the analysis of the generated
data.
The wish is that this Python package provides an easy and general way
of constructing efficient specialized DNS C++ codes for different
turbulence problems encountered in research.
At the same time, the package should provide a unified way of
postprocessing, and accessing the postprocessing results.
The code therefore consists of two main parts: the pure C++ code, a set
of loosely related "building blocks", and the Python code, which can
generate C++ code using the pure classes, but with a significant degree
of flexibility.
The code user is expected to write a small python script that will
properly define the DNS they are interested in running.
That code will generate an executable that can then be run directly on
the user's machine, or submitted to a queue on a cluster.
.. _sec-introduction:
------------
Introduction
------------
TurTLE implements a number of standard functionality of Fourier-based
pseudo-spectral numerical simulations, as well as the corresponding
numerical particle-tracking functionality.
The package also contains a Navier-Stokes solver, as well as a small
number of post-processing tools.
The solver is production-ready, having already been used for a number of
publications.
TurTLE is written in C++ and it uses a hybrid MPI/OpenMP programming
paradigm, relying on FFTW3 for an efficient Fourier transform
implementation.
HDF5 is used for I/O, including for parameter input and output of
statistics.
CMake is used for compilation and installation.
A Python 3 wrapper is used to prepare parameters and initial conditions,
as well as to generate job submission scripts for clusters and to
perform basic post-processing of simulation results.
The primary aim of TurTLE is to provide optimal performance, while
reducing the time spent on setting up and supervising ensembles of DNS,
with the added benefit of a unified launch-postprocess approach through
the use of the Python wrapper.
.. _sec-installation:
......@@ -40,51 +43,41 @@ the user's machine, or submitted to a queue on a cluster.
Installation
------------
So far, the code has been run on laptops, desktops, and a couple of
clusters (biggest run so far was 1536^3 on 16 nodes of 32 cores each,
with about 11 seconds per time step, for a simple incompressible
Navier-Stokes problem).
Postprocessing data may not be very computationally intensive, depending
on the amount of data involved.
TurTLE can be used on various machines, with laptops routinely being
used for development and testing, but large production runs using tens
of thousands of CPU cores on large computing clusters.
**Postprocessing only**
Use a console; navigate to the ``turtle`` folder, and type:
The Python 3 package may be installed directly if only post-processing
of existing data is desired:
.. code:: bash
python setup.py install
(add `--user` or `sudo` as appropriate).
`setup.py` should tell you about the various packages you need.
`setup.py` uses the `setuptools` package for dependency resolution.
**Full installation**
If you want to run simulations on the machine where you're installing,
you will need to use `cmake` to compile and install the full library.
Your machine needs to have an MPI compiler installed, the HDF5 C library
and FFTW >= 3.4 --- detailed instructions are
included at the end of this document.
The C++ library requires a number of dependencies, that `CMake` will
search for before compilation and installation.
In brief, an MPI compiler is required, as well as the HDF5 C library
compiled with parallel support and FFTW >= 3.4.
Detailed instructions are provided at the end of this document.
-------------
Documentation
-------------
While the code is not fully documented yet, basic information is already
available, and it is recommended that you generate the manual and go
through it carefully.
Please do ask for specific improvements to the current text where it is
found lacking.
In order to generate the manual, navigate to the repository folder, and
execute the following commands:
.. code:: bash
cd documentation
make latexpdf
Optionally, html documentation can be generated instead if needed, just
type ``make html`` instead of ``make latexpdf``.
Partial documentation is available (link to online documentation coming
soon).
Doxygen is used for the C++ source code, and the `Sphinx` and `breathe`
Python packages are used to merge the Python wrapper documentation with
the C++ documentation.
The optional `CMake` targets `doc_doxygen`, `doc_html` and `doc_latex`
generate the required documents in the build directory.
--------
Comments
......@@ -104,9 +97,9 @@ Comments
* code is used mainly with Python 3.5 and later, and it is not tested at
all with Python 2.x
-------------------------------
Installation with prerequisites
-------------------------------
-----------------
Full installation
-----------------
These installation steps assume that you have a working MPI compiler,
properly configured on your system (i.e. the various configure scripts
......@@ -166,7 +159,8 @@ Detailed full installation instructions:
TurTLE will try to find FFTW using the FindFFTW from the Morse project.
If the package is installed in a non standard location, it is recommanded
to setup the environment variables: `FFTW_DIR`, `FFTW_INCDIR` and `FFTW_LIBDIR`.
to setup the environment variables: `FFTW_DIR`, `FFTW_INCDIR`,
`FFTW_LIBDIR`, `FFTW_OPEMMP_LIBDIR` and `FFTW_MPI_LIBDIR`.
4.
Download, compile, install HDF5 (version 1.8.x, currently available
......@@ -183,7 +177,7 @@ Detailed full installation instructions:
Therefore, if the package is installed in a non standard location, it is recommanded
to setup the environment variable: HDF5_ROOT.
5. TurTLE requires `cmake` version > 3.6 (possibly 3.12).
5. TurTLE requires `cmake` version > 3.12.
If required, download, compile and install cmake, currently
available at https://cmake.org/cmake/resources/software.html.
......@@ -195,7 +189,9 @@ Detailed full installation instructions:
5.
Optional.
We recommend the creation of a virtual python3 environment (also under PREFIX) that will be used for installing TurTLE and dependencies.
We recommend the creation of a virtual python3 environment (also
under PREFIX) that will be used for installing TurTLE and
dependencies.
Please see https://docs.python-guide.org/dev/virtualenvs/.
6.
......@@ -251,10 +247,11 @@ Detailed full installation instructions:
Scaling tests.
--------------------
Initial scaling data is available at <insert url here>.
Initial scaling data is available at (url coming soon).
Please copy the files to the location `TURTLE_FIELD_DATABASE`.
Separately, please recompile TurTLE after appending "-D USE_TIMINGOUTPUT" to the `TURTLE_COMPILATION_FLAGS` environment variable
Separately, please recompile TurTLE with the `TIMING_OUTPUT` cmake
option switched to `ON`.
Afterwards, please run variations of the following command:
......
......@@ -2,20 +2,20 @@
# #
# Copyright 2015-2019 Max Planck Institute for Dynamics and Self-Organization #
# #
# This file is part of TurTLE. #
# This file is part of TurTLE. #
# #
# TurTLE is free software: you can redistribute it and/or modify #
# TurTLE is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# TurTLE is distributed in the hope that it will be useful, #
# TurTLE is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with TurTLE. If not, see <http://www.gnu.org/licenses/> #
# along with TurTLE. If not, see <http://www.gnu.org/licenses/> #
# #
# Contact: Cristian.Lalescu@ds.mpg.de #
# #
......@@ -27,7 +27,6 @@ import os
import sys
import shutil
import subprocess
import argparse
import h5py
import math
import numpy as np
......@@ -440,7 +439,7 @@ class DNS(_code):
assert (self.parameters['niter_todo'] % self.parameters['niter_stat'] == 0)
assert (self.parameters['niter_todo'] % self.parameters['niter_out'] == 0)
assert (self.parameters['niter_out'] % self.parameters['niter_stat'] == 0)
if self.dns_type in ['NSVEparticles_no_output', 'NSVEcomplex_particles', 'NSVEparticles', 'static_field', 'static_field_with_ghost_collisions']:
if self.dns_type in ['NSVEparticles_no_output', 'NSVEcomplex_particles', 'NSVEparticles', 'static_field', 'static_field_with_ghost_collisions', 'kraichnan_field']:
assert (self.parameters['niter_todo'] % self.parameters['niter_part'] == 0)
assert (self.parameters['niter_out'] % self.parameters['niter_part'] == 0)
_code.write_par(self, iter0 = iter0)
......@@ -630,16 +629,10 @@ class DNS(_code):
parser_NSVE = subparsers.add_parser(
'NSVE',
help = 'plain Navier-Stokes vorticity formulation')
self.simulation_parser_arguments(parser_NSVE)
self.job_parser_arguments(parser_NSVE)
self.parameters_to_parser_arguments(parser_NSVE)
parser_NSVE_no_output = subparsers.add_parser(
'NSVE_no_output',
help = 'plain Navier-Stokes vorticity formulation, checkpoints are NOT SAVED')
self.simulation_parser_arguments(parser_NSVE_no_output)
self.job_parser_arguments(parser_NSVE_no_output)
self.parameters_to_parser_arguments(parser_NSVE_no_output)
parser_NSVEparticles_no_output = subparsers.add_parser(
'NSVEparticles_no_output',
......@@ -653,6 +646,10 @@ class DNS(_code):
'static_field_with_ghost_collisions',
help = 'static field with basic fluid tracers and ghost collisions')
parser_kraichnan_field = subparsers.add_parser(
'kraichnan_field',
help = 'Kraichnan field with basic fluid tracers')
parser_NSVEp2 = subparsers.add_parser(
'NSVEparticles',
help = 'plain Navier-Stokes vorticity formulation, with basic fluid tracers')
......@@ -660,20 +657,49 @@ class DNS(_code):
parser_NSVEp2p = subparsers.add_parser(
'NSVEcomplex_particles',
help = 'plain Navier-Stokes vorticity formulation, with oriented active particles')
parser_NSVEp_extra = subparsers.add_parser(
'NSVEp_extra_sampling',
help = 'plain Navier-Stokes vorticity formulation, with basic fluid tracers, that sample velocity gradient, as well as pressure and its derivatives.')
for parser in ['NSVEparticles_no_output', 'NSVEp2', 'NSVEp2p', 'NSVEp_extra', 'static_field', 'static_field_with_ghost_collisions']:
for parser in [
'NSVE',
'NSVE_no_output',
'NSVEparticles_no_output',
'NSVEp2',
'NSVEp2p',
'NSVEp_extra',
'static_field',
'static_field_with_ghost_collisions',
'kraichnan_field']:
eval('self.simulation_parser_arguments({0})'.format('parser_' + parser))
eval('self.job_parser_arguments({0})'.format('parser_' + parser))
eval('self.particle_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments('
'parser_{0},'
'self.generate_extra_parameters(\'{0}\'))'.format(parser))
for parser in [
'NSVEparticles_no_output',
'NSVEp2',
'NSVEp2p',
'NSVEp_extra',
'static_field',
'kraichnan_field']:
eval('self.particle_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments('
'parser_{0},'
'self.NSVEp_extra_parameters)'.format(parser))
return None
def generate_extra_parameters(
self,
dns_type):
pars = {}
if dns_type == 'kraichnan_field':
pars['output_velocity'] = int(1)
pars['field_random_seed'] = int(1)
pars['spectrum_slope'] = float(-5./3)
pars['spectrum_k_cutoff'] = float(16)
pars['spectrum_coefficient'] = float(1)
return pars
def prepare_launch(
self,
args = [],
......@@ -708,13 +734,23 @@ class DNS(_code):
self.dns_type = opt.DNS_class
self.name = self.dns_type + '-' + self.fluid_precision + '-v' + TurTLE.__version__
# merge parameters if needed
if self.dns_type in ['NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field', 'static_field_with_ghost_collisions']:
if self.dns_type in [
'NSVEparticles',
'NSVEcomplex_particles',
'NSVEparticles_no_output',
'NSVEp_extra_sampling',
'static_field',
'static_field_with_ghost_collisions',
'kraichnan_field']:
for k in self.NSVEp_extra_parameters.keys():
self.parameters[k] = self.NSVEp_extra_parameters[k]
if type(extra_parameters) != type(None):
if self.dns_type in extra_parameters.keys():
for k in extra_parameters[self.dns_type].keys():
self.parameters[k] = extra_parameters[self.dns_type][k]
additional_parameters = self.generate_extra_parameters(self.dns_type)
for k in additional_parameters.keys():
self.parameters[k] = additional_parameters[k]
if ((self.parameters['niter_todo'] % self.parameters['niter_out']) != 0):
self.parameters['niter_out'] = self.parameters['niter_todo']
if len(opt.src_work_dir) == 0:
......@@ -744,7 +780,10 @@ class DNS(_code):
opt.nz > opt.n):
opt.n = min(opt.nx, opt.ny, opt.nz)
print("Warning: '-n' parameter changed to minimum of nx, ny, nz. This affects the computation of nu.")
self.parameters['dt'] = (opt.dtfactor / opt.n)
if self.dns_type in ['kraichnan_field']:
self.parameters['dt'] = opt.dtfactor * 0.5 / opt.n**2
else:
self.parameters['dt'] = (opt.dtfactor / opt.n)
self.parameters['nu'] = (opt.kMeta * 2 / opt.n)**(4./3)
# check value of kMax
kM = opt.n * 0.5
......@@ -773,7 +812,7 @@ class DNS(_code):
# hardcoded FFTW complex representation size
field_size = 3*(opt.nx+2)*opt.ny*opt.nz*self.fluid_dtype.itemsize
checkpoint_size = field_size
if self.dns_type in ['static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field_with_ghost_collisions']:
if self.dns_type in ['static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field_with_ghost_collisions', 'kraichnan_field']:
rhs_size = self.parameters['tracers0_integration_steps']
if type(opt.tracers0_integration_steps) != type(None):
rhs_size = opt.tracers0_integration_steps
......@@ -1009,19 +1048,27 @@ class DNS(_code):
# take care of fields' initial condition
# first, check if initial field exists
need_field = False
if not os.path.exists(self.get_checkpoint_0_fname()):
need_field = True
else:
f = h5py.File(self.get_checkpoint_0_fname(), 'r')
try:
dset = f['vorticity/complex/0']
need_field = (dset.shape == (self.parameters['ny'],
self.parameters['nz'],
self.parameters['nx']//2+1,
3))
except:
if self.dns_type in [
'NSVE',
'NSVE_no_output',
'static_field',
'NSVEparticles',
'NSVEcomplex_particles',
'NSVEparticles_no_output',
'NSVEp_extra_sampling']:
if not os.path.exists(self.get_checkpoint_0_fname()):
need_field = True
f.close()
else:
f = h5py.File(self.get_checkpoint_0_fname(), 'r')
try:
dset = f['vorticity/complex/0']
need_field = (dset.shape == (self.parameters['ny'],
self.parameters['nz'],
self.parameters['nx']//2+1,
3))
except:
need_field = True
f.close()
if need_field:
f = h5py.File(self.get_checkpoint_0_fname(), 'a')
if len(opt.src_simname) > 0:
......@@ -1048,8 +1095,20 @@ class DNS(_code):
amplitude = 0.05)
f['vorticity/complex/{0}'.format(0)] = data
f.close()
if self.dns_type == 'kraichnan_field':
if not os.path.exists(self.get_checkpoint_0_fname()):
f = h5py.File(self.get_checkpoint_0_fname(), 'a')
f.create_group('velocity/real')
f.close()
# now take care of particles' initial condition
if self.dns_type in ['static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field_with_ghost_collisions']:
if self.dns_type in [
'kraichnan_field',
'static_field',
'static_field_with_ghost_collisions',
'NSVEparticles',
'NSVEcomplex_particles',
'NSVEparticles_no_output',
'NSVEp_extra_sampling']:
self.generate_particle_data(opt = opt)
return None
def launch_jobs(
......@@ -1058,6 +1117,9 @@ class DNS(_code):
if not os.path.exists(self.get_data_file_name()):
self.generate_initial_condition(opt = opt)
self.write_par()
if (('test' in self.dns_type) or
(self.dns_type in ['kraichnan_field'])):
self.check_current_vorticity_exists = False
self.run(
nb_processes = opt.nb_processes,
nb_threads_per_process = opt.nb_threads_per_process,
......@@ -1067,4 +1129,3 @@ class DNS(_code):
no_submit = opt.no_submit,
no_debug = opt.no_debug)
return None
......@@ -51,6 +51,7 @@ class TEST(_code):
work_dir = work_dir,
simname = simname)
self.generate_default_parameters()
self.check_current_vorticity_exists = False
return None
def set_precision(
self,
......@@ -118,7 +119,7 @@ class TEST(_code):
self.parameters['dky'] = float(1.0)
self.parameters['dkz'] = float(1.0)
self.parameters['filter_length'] = float(1.0)
self.parameters['random_seed'] = int(1)
self.parameters['field_random_seed'] = int(1)
return None
def generate_extra_parameters(
self,
......@@ -129,6 +130,12 @@ class TEST(_code):
pars['tracers0_integration_steps'] = int(4)
pars['tracers0_neighbours'] = int(1)
pars['tracers0_smoothness'] = int(1)
if dns_type == 'Gauss_field_test':
pars['histogram_bins'] = int(129)
pars['max_velocity_estimate'] = float(8)
pars['spectrum_slope'] = float(-5./3)
pars['spectrum_k_cutoff'] = float(16)
pars['spectrum_coefficient'] = float(1)
return pars
def get_kspace(self):
kspace = {}
......@@ -175,6 +182,83 @@ class TEST(_code):
kspace = self.get_kspace()
nshells = kspace['nshell'].shape[0]
ofile['checkpoint'] = int(0)
vec_spectra_stats = []
tens_rspace_stats = []
vec4_rspace_stats = []
scal_rspace_stats = []
if self.dns_type in ['Gauss_field_test']:
vec_spectra_stats.append('velocity')
vec4_rspace_stats.append('velocity')
tens_rspace_stats.append('velocity_gradient')
scal_rspace_stats.append('velocity_divergence')
for k in vec_spectra_stats:
time_chunk = 2**20//(8*3*3*nshells)
time_chunk = max(time_chunk, 1)
ofile.create_dataset('statistics/spectra/' + k + '_' + k,
(1, nshells, 3, 3),
chunks = (time_chunk, nshells, 3, 3),
maxshape = (None, nshells, 3, 3),
dtype = np.float64)
for k in scal_rspace_stats:
time_chunk = 2**20//(8*10)
time_chunk = max(time_chunk, 1)
a = ofile.create_dataset('statistics/moments/' + k,
(1, 10),
chunks = (time_chunk, 10),
maxshape = (None, 10),
dtype = np.float64)
time_chunk = 2**20//(8*self.parameters['histogram_bins'])
time_chunk = max(time_chunk, 1)