Commit 5156a042 authored by Cristian Lalescu's avatar Cristian Lalescu
Browse files

Merge branch 'kraichnan_field' into develop

parents 84deadfe 3d419f12
Pipeline #63960 passed with stage
in 4 minutes and 11 seconds
......@@ -262,11 +262,13 @@ set(cpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/full_code/direct_numerical_simulation.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVE.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/static_field.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/kraichnan_field.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/joint_acc_vel_stats.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/filter_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/write_filtered_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/Gauss_field_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/symmetrize_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_output_test.cpp
${PROJECT_SOURCE_DIR}/cpp/full_code/get_rfields.cpp
......@@ -308,9 +310,11 @@ set(hpp_for_lib
${PROJECT_SOURCE_DIR}/cpp/full_code/direct_numerical_simulation.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/NSVE.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/static_field.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/kraichnan_field.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/joint_acc_vel_stats.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/filter_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/Gauss_field_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/field_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/write_filtered_test.hpp
${PROJECT_SOURCE_DIR}/cpp/full_code/symmetrize_test.hpp
......
......@@ -439,7 +439,7 @@ class DNS(_code):
assert (self.parameters['niter_todo'] % self.parameters['niter_stat'] == 0)
assert (self.parameters['niter_todo'] % self.parameters['niter_out'] == 0)
assert (self.parameters['niter_out'] % self.parameters['niter_stat'] == 0)
if self.dns_type in ['NSVEparticles_no_output', 'NSVEcomplex_particles', 'NSVEparticles', 'static_field']:
if self.dns_type in ['NSVEparticles_no_output', 'NSVEcomplex_particles', 'NSVEparticles', 'static_field', 'kraichnan_field']:
assert (self.parameters['niter_todo'] % self.parameters['niter_part'] == 0)
assert (self.parameters['niter_out'] % self.parameters['niter_part'] == 0)
_code.write_par(self, iter0 = iter0)
......@@ -627,16 +627,10 @@ class DNS(_code):
parser_NSVE = subparsers.add_parser(
'NSVE',
help = 'plain Navier-Stokes vorticity formulation')
self.simulation_parser_arguments(parser_NSVE)
self.job_parser_arguments(parser_NSVE)
self.parameters_to_parser_arguments(parser_NSVE)
parser_NSVE_no_output = subparsers.add_parser(
'NSVE_no_output',
help = 'plain Navier-Stokes vorticity formulation, checkpoints are NOT SAVED')
self.simulation_parser_arguments(parser_NSVE_no_output)
self.job_parser_arguments(parser_NSVE_no_output)
self.parameters_to_parser_arguments(parser_NSVE_no_output)
parser_NSVEparticles_no_output = subparsers.add_parser(
'NSVEparticles_no_output',
......@@ -646,6 +640,10 @@ class DNS(_code):
'static_field',
help = 'static field with basic fluid tracers')
parser_kraichnan_field = subparsers.add_parser(
'kraichnan_field',
help = 'Kraichnan field with basic fluid tracers')
parser_NSVEp2 = subparsers.add_parser(
'NSVEparticles',
help = 'plain Navier-Stokes vorticity formulation, with basic fluid tracers')
......@@ -653,22 +651,47 @@ class DNS(_code):
parser_NSVEp2p = subparsers.add_parser(
'NSVEcomplex_particles',
help = 'plain Navier-Stokes vorticity formulation, with oriented active particles')
parser_NSVEp_extra = subparsers.add_parser(
'NSVEp_extra_sampling',
help = 'plain Navier-Stokes vorticity formulation, with basic fluid tracers, that sample velocity gradient, as well as pressure and its derivatives.')
parser_NSVE_ou = subparsers.add_parser(
'NSVE_ou_forcing',
help = 'plain Navier-Stokes vorticity formulation, with ornstein-uhlenbeck forcing')
for parser in ['NSVEparticles_no_output', 'NSVEp2', 'NSVEp2p', 'NSVEp_extra', 'static_field']:
for parser in [
'NSVE',
'NSVE_no_output',
'NSVEparticles_no_output',
'NSVEp2',
'NSVEp2p',
'NSVEp_extra',
'static_field',
'kraichnan_field']:
eval('self.simulation_parser_arguments({0})'.format('parser_' + parser))
eval('self.job_parser_arguments({0})'.format('parser_' + parser))
eval('self.particle_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments('
'parser_{0},'
'self.generate_extra_parameters(\'{0}\'))'.format(parser))
for parser in [
'NSVEparticles_no_output',
'NSVEp2',
'NSVEp2p',
'NSVEp_extra',
'static_field',
'kraichnan_field']:
eval('self.particle_parser_arguments({0})'.format('parser_' + parser))
eval('self.parameters_to_parser_arguments('
'parser_{0},'
'self.NSVEp_extra_parameters)'.format(parser))
return None
def generate_extra_parameters(
self,
dns_type):
pars = {}
if dns_type == 'kraichnan_field':
pars['output_velocity'] = int(1)
pars['field_random_seed'] = int(1)
pars['spectrum_slope'] = float(-5./3)
pars['spectrum_k_cutoff'] = float(16)
pars['spectrum_coefficient'] = float(1)
return pars
def prepare_launch(
self,
args = [],
......@@ -703,13 +726,16 @@ class DNS(_code):
self.dns_type = opt.DNS_class
self.name = self.dns_type + '-' + self.fluid_precision + '-v' + TurTLE.__version__
# merge parameters if needed
if self.dns_type in ['NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field']:
if self.dns_type in ['NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling', 'static_field', 'kraichnan_field']:
for k in self.NSVEp_extra_parameters.keys():
self.parameters[k] = self.NSVEp_extra_parameters[k]
if type(extra_parameters) != type(None):
if self.dns_type in extra_parameters.keys():
for k in extra_parameters[self.dns_type].keys():
self.parameters[k] = extra_parameters[self.dns_type][k]
additional_parameters = self.generate_extra_parameters(self.dns_type)
for k in additional_parameters.keys():
self.parameters[k] = additional_parameters[k]
if ((self.parameters['niter_todo'] % self.parameters['niter_out']) != 0):
self.parameters['niter_out'] = self.parameters['niter_todo']
if len(opt.src_work_dir) == 0:
......@@ -739,7 +765,10 @@ class DNS(_code):
opt.nz > opt.n):
opt.n = min(opt.nx, opt.ny, opt.nz)
print("Warning: '-n' parameter changed to minimum of nx, ny, nz. This affects the computation of nu.")
self.parameters['dt'] = (opt.dtfactor / opt.n)
if self.dns_type in ['kraichnan_field']:
self.parameters['dt'] = opt.dtfactor * 0.5 / opt.n**2
else:
self.parameters['dt'] = (opt.dtfactor / opt.n)
self.parameters['nu'] = (opt.kMeta * 2 / opt.n)**(4./3)
# check value of kMax
kM = opt.n * 0.5
......@@ -768,7 +797,7 @@ class DNS(_code):
# hardcoded FFTW complex representation size
field_size = 3*(opt.nx+2)*opt.ny*opt.nz*self.fluid_dtype.itemsize
checkpoint_size = field_size
if self.dns_type in ['static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling']:
if self.dns_type in ['kraichnan_field', 'static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling']:
rhs_size = self.parameters['tracers0_integration_steps']
if type(opt.tracers0_integration_steps) != type(None):
rhs_size = opt.tracers0_integration_steps
......@@ -1004,19 +1033,27 @@ class DNS(_code):
# take care of fields' initial condition
# first, check if initial field exists
need_field = False
if not os.path.exists(self.get_checkpoint_0_fname()):
need_field = True
else:
f = h5py.File(self.get_checkpoint_0_fname(), 'r')
try:
dset = f['vorticity/complex/0']
need_field = (dset.shape == (self.parameters['ny'],
self.parameters['nz'],
self.parameters['nx']//2+1,
3))
except:
if self.dns_type in [
'NSVE',
'NSVE_no_output',
'static_field',
'NSVEparticles',
'NSVEcomplex_particles',
'NSVEparticles_no_output',
'NSVEp_extra_sampling']:
if not os.path.exists(self.get_checkpoint_0_fname()):
need_field = True
f.close()
else:
f = h5py.File(self.get_checkpoint_0_fname(), 'r')
try:
dset = f['vorticity/complex/0']
need_field = (dset.shape == (self.parameters['ny'],
self.parameters['nz'],
self.parameters['nx']//2+1,
3))
except:
need_field = True
f.close()
if need_field:
f = h5py.File(self.get_checkpoint_0_fname(), 'a')
if len(opt.src_simname) > 0:
......@@ -1043,8 +1080,19 @@ class DNS(_code):
amplitude = 0.05)
f['vorticity/complex/{0}'.format(0)] = data
f.close()
if self.dns_type == 'kraichnan_field':
if not os.path.exists(self.get_checkpoint_0_fname()):
f = h5py.File(self.get_checkpoint_0_fname(), 'a')
f.create_group('velocity/real')
f.close()
# now take care of particles' initial condition
if self.dns_type in ['static_field', 'NSVEparticles', 'NSVEcomplex_particles', 'NSVEparticles_no_output', 'NSVEp_extra_sampling']:
if self.dns_type in [
'kraichnan_field',
'static_field',
'NSVEparticles',
'NSVEcomplex_particles',
'NSVEparticles_no_output',
'NSVEp_extra_sampling']:
self.generate_particle_data(opt = opt)
return None
def launch_jobs(
......@@ -1053,6 +1101,9 @@ class DNS(_code):
if not os.path.exists(self.get_data_file_name()):
self.generate_initial_condition(opt = opt)
self.write_par()
if (('test' in self.dns_type) or
(self.dns_type in ['kraichnan_field'])):
self.check_current_vorticity_exists = False
self.run(
nb_processes = opt.nb_processes,
nb_threads_per_process = opt.nb_threads_per_process,
......
......@@ -51,6 +51,7 @@ class TEST(_code):
work_dir = work_dir,
simname = simname)
self.generate_default_parameters()
self.check_current_vorticity_exists = False
return None
def set_precision(
self,
......@@ -118,7 +119,7 @@ class TEST(_code):
self.parameters['dky'] = float(1.0)
self.parameters['dkz'] = float(1.0)
self.parameters['filter_length'] = float(1.0)
self.parameters['random_seed'] = int(1)
self.parameters['field_random_seed'] = int(1)
return None
def generate_extra_parameters(
self,
......@@ -129,6 +130,12 @@ class TEST(_code):
pars['tracers0_integration_steps'] = int(4)
pars['tracers0_neighbours'] = int(1)
pars['tracers0_smoothness'] = int(1)
if dns_type == 'Gauss_field_test':
pars['histogram_bins'] = int(129)
pars['max_velocity_estimate'] = float(8)
pars['spectrum_slope'] = float(-5./3)
pars['spectrum_k_cutoff'] = float(16)
pars['spectrum_coefficient'] = float(1)
return pars
def get_kspace(self):
kspace = {}
......@@ -175,6 +182,83 @@ class TEST(_code):
kspace = self.get_kspace()
nshells = kspace['nshell'].shape[0]
ofile['checkpoint'] = int(0)
vec_spectra_stats = []
tens_rspace_stats = []
vec4_rspace_stats = []
scal_rspace_stats = []
if self.dns_type in ['Gauss_field_test']:
vec_spectra_stats.append('velocity')
vec4_rspace_stats.append('velocity')
tens_rspace_stats.append('velocity_gradient')
scal_rspace_stats.append('velocity_divergence')
for k in vec_spectra_stats:
time_chunk = 2**20//(8*3*3*nshells)
time_chunk = max(time_chunk, 1)
ofile.create_dataset('statistics/spectra/' + k + '_' + k,
(1, nshells, 3, 3),
chunks = (time_chunk, nshells, 3, 3),
maxshape = (None, nshells, 3, 3),
dtype = np.float64)
for k in scal_rspace_stats:
time_chunk = 2**20//(8*10)
time_chunk = max(time_chunk, 1)
a = ofile.create_dataset('statistics/moments/' + k,
(1, 10),
chunks = (time_chunk, 10),
maxshape = (None, 10),
dtype = np.float64)
time_chunk = 2**20//(8*self.parameters['histogram_bins'])
time_chunk = max(time_chunk, 1)
ofile.create_dataset('statistics/histograms/' + k,
(1,
self.parameters['histogram_bins']),
chunks = (time_chunk,
self.parameters['histogram_bins']),
maxshape = (None,
self.parameters['histogram_bins']),
dtype = np.int64)
for k in vec4_rspace_stats:
time_chunk = 2**20//(8*4*10)
time_chunk = max(time_chunk, 1)
a = ofile.create_dataset('statistics/moments/' + k,
(1, 10, 4),
chunks = (time_chunk, 10, 4),
maxshape = (None, 10, 4),
dtype = np.float64)
time_chunk = 2**20//(8*4*self.parameters['histogram_bins'])
time_chunk = max(time_chunk, 1)
ofile.create_dataset('statistics/histograms/' + k,
(1,
self.parameters['histogram_bins'],
4),
chunks = (time_chunk,
self.parameters['histogram_bins'],
4),
maxshape = (None,
self.parameters['histogram_bins'],
4),
dtype = np.int64)
for k in tens_rspace_stats:
time_chunk = 2**20//(8*9*10)
time_chunk = max(time_chunk, 1)
a = ofile.create_dataset('statistics/moments/' + k,
(1, 10, 3, 3),
chunks = (time_chunk, 10, 3, 3),
maxshape = (None, 10, 3, 3),
dtype = np.float64)
time_chunk = 2**20//(8*9*self.parameters['histogram_bins'])
time_chunk = max(time_chunk, 1)
ofile.create_dataset('statistics/histograms/' + k,
(1,
self.parameters['histogram_bins'],
3, 3),
chunks = (time_chunk,
self.parameters['histogram_bins'],
3, 3),
maxshape = (None,
self.parameters['histogram_bins'],
3, 3),
dtype = np.int64)
return None
def job_parser_arguments(
self,
......@@ -257,6 +341,9 @@ class TEST(_code):
dest = 'TEST_class',
help = 'type of simulation to run')
subparsers.required = True
parser_Gauss_field_test = subparsers.add_parser(
'Gauss_field_test',
help = 'test incompressible Gaussian random fields')
parser_filter_test = subparsers.add_parser(
'filter_test',
help = 'plain filter test')
......@@ -275,23 +362,21 @@ class TEST(_code):
parser_test_interpolation = subparsers.add_parser(
'test_interpolation',
help = 'test velocity gradient interpolation')
parser_ornstein_uhlenbeck_test = subparsers.add_parser(
'ornstein_uhlenbeck_test',
help = 'test ornstein uhlenbeck process')
for parser in ['parser_filter_test',
'parser_write_filtered_test',
'parser_field_test',
'parser_symmetrize_test',
'parser_field_output_test',
'parser_test_interpolation',
'parser_ornstein_uhlenbeck_test']:
eval('self.simulation_parser_arguments(' + parser + ')')
eval('self.job_parser_arguments(' + parser + ')')
eval('self.parameters_to_parser_arguments(' + parser + ')')
eval('self.parameters_to_parser_arguments(' + parser + ',' +
'parameters = self.generate_extra_parameters(dns_type = \'' + parser + '\'))')
for parser in ['Gauss_field_test',
'filter_test',
'write_filtered_test',
'field_test',
'symmetrize_test',
'field_output_test',
'test_interpolation',
'ornstein_uhlenbeck_test']:
eval('self.simulation_parser_arguments(parser_' + parser + ')')
eval('self.job_parser_arguments(parser_' + parser + ')')
eval('self.parameters_to_parser_arguments(parser_' + parser + ')')
return None
def prepare_launch(
self,
......@@ -300,6 +385,8 @@ class TEST(_code):
self.set_precision(opt.precision)
self.dns_type = opt.TEST_class
self.name = self.dns_type + '-' + self.fluid_precision + '-v' + TurTLE.__version__
self.parameters.update(
self.generate_extra_parameters(dns_type = self.dns_type))
# merge parameters if needed
self.pars_from_namespace(opt)
return opt
......@@ -308,8 +395,6 @@ class TEST(_code):
args = [],
**kwargs):
opt = self.prepare_launch(args = args)
self.parameters.update(
self.generate_extra_parameters(dns_type = self.dns_type))
self.launch_jobs(opt = opt, **kwargs)
return None
def launch_jobs(
......
......@@ -176,6 +176,7 @@ class _code(_base):
"""
self.host_info = host_info
self.main = ''
self.check_current_vorticity_exists = True
return None
def write_src(self):
with open(self.name + '.cpp', 'w') as outfile:
......@@ -278,7 +279,7 @@ class _code(_base):
if os.path.exists(os.path.join(self.work_dir, 'stop_' + self.simname)):
warnings.warn("Found stop_<simname> file, I will remove it.")
os.remove(os.path.join(self.work_dir, 'stop_' + self.simname))
if not 'test' in self.name:
if self.check_current_vorticity_exists:
with h5py.File(os.path.join(self.work_dir, self.simname + '.h5'), 'r') as data_file:
iter0 = data_file['iteration'][...]
checkpoint0 = data_file['checkpoint'][()]
......
#! /usr/bin/env python
import numpy as np
from scipy import trapz
from scipy.stats import norm
from scipy.integrate import quad
import h5py
import sys
import time
import TurTLE
from TurTLE import TEST
try:
import matplotlib.pyplot as plt
except:
plt = None
def main():
c = TEST()
# size of grid
n = 256
slope = -5./3.
k_cutoff = 30.
func = lambda k, k_c=k_cutoff, s=slope : k**s*np.exp(-k/k_c)
total_energy = quad(func, 1, k_cutoff*4)[0]
coeff = 1./total_energy
bin_no = 100
rseed = int(time.time())
c.launch(
['Gauss_field_test',
'--nx', str(n),
'--ny', str(n),
'--nz', str(n),
'--simname', 'Gaussianity_test',
'--np', '4',
'--ntpp', '1',
'--wd', './',
'--histogram_bins', str(bin_no),
'--max_velocity_estimate', '8.',
'--spectrum_slope', str(slope),
'--spectrum_k_cutoff', str(k_cutoff),
'--spectrum_coefficient', str(coeff),
'--field_random_seed', str(rseed)] +
sys.argv[1:])
plot_stuff(c.simname, total_energy = total_energy)
return None
def plot_stuff(simname, total_energy = 1.):
df = h5py.File(simname + '.h5', 'r')
for kk in ['spectrum_slope',
'spectrum_k_cutoff',
'spectrum_coefficient',
'field_random_seed',
'histogram_bins']:
print(kk, df['parameters/' + kk][...])
coeff = df['parameters/spectrum_coefficient'][()]
k_cutoff = df['parameters/spectrum_k_cutoff'][()]
slope = df['parameters/spectrum_slope'][()]
bin_no = df['parameters/histogram_bins'][()]
f = plt.figure()
# test spectrum
a = f.add_subplot(121)
kk = df['kspace/kshell'][...]
print('dk: {}'.format(df['kspace/dk'][()]))
phi_ij = df['statistics/spectra/velocity_velocity'][0] / df['kspace/dk'][()]
energy = (phi_ij[..., 0, 0] + phi_ij[..., 1, 1] + phi_ij[..., 2, 2])/2
a.scatter(kk[1:-2], energy[1:-2])
a.plot(kk[1:-2], coeff*kk[1:-2]**slope*np.exp(-kk[1:-2]/k_cutoff), ls='--', c='C0')
a.set_xscale('log')
a.set_yscale('log')
# test isotropy
a = f.add_subplot(122)
max_vel_estimate = df['parameters/max_velocity_estimate'][()]
velbinsize = 2*max_vel_estimate/bin_no
vel = np.linspace(-max_vel_estimate+velbinsize/2, max_vel_estimate-velbinsize/2, bin_no)
hist_vel = df['statistics/histograms/velocity'][0, :, :3]
f_vel = hist_vel / np.sum(hist_vel, axis=0, keepdims=True).astype(float) / velbinsize
print('Energy analytically: {}'.format(total_energy))
print('Energy sum: {}'.format(np.sum(energy*df['kspace/dk'][()])))
print('Moment sum: {}'.format(df['statistics/moments/velocity'][0,2,3]/2))
print('Velocity variances: {}'.format(trapz(vel[:,None]**2*f_vel, vel[:,None], axis=0)))
vel_variance = df['statistics/moments/velocity'][0,2,3]/3.
a.plot(vel[:,None]/np.sqrt(vel_variance), f_vel*np.sqrt(vel_variance))
a.plot(vel/np.sqrt(vel_variance), norm.pdf(vel/np.sqrt(vel_variance)), ls='--')
a.set_yscale('log')
a.set_xlim(-5,5)
a.set_ylim(1e-5,1)
f.tight_layout()
f.savefig('spectrum_isotropy_test.pdf')
plt.close(f)
### check divergence
print('Divergence second moment is: {0}'.format(
df['statistics/moments/velocity_divergence'][0, 2]))
print('Gradient second moment is: {0}'.format(
df['statistics/moments/velocity_gradient'][0, 2].mean()))
df.close()
return None
if __name__ == '__main__':
main()
......@@ -29,6 +29,7 @@
#include <cstdlib>
#include <algorithm>
#include <cassert>
#include <random>
#include "field.hpp"
#include "scope_timer.hpp"
#include "shared_array.hpp"
......@@ -1500,7 +1501,7 @@ int compute_gradient(
assert(!src->real_space_representation);
assert((fc1 == ONE && fc2 == THREE) ||
(fc1 == THREE && fc2 == THREExTHREE));
std::fill_n(dst->get_rdata(), dst->rmemlayout->local_size, 0);
*dst = 0.0;
dst->real_space_representation = false;
switch(fc1)
{
......@@ -1547,6 +1548,34 @@ int compute_gradient(
return EXIT_SUCCESS;
}
template <typename rnumber,
field_backend be,
kspace_dealias_type dt>
int compute_divergence(
kspace<be, dt> *kk,
field<rnumber, be, THREE> *src,
field<rnumber, be, ONE> *dst)
{
TIMEZONE("compute_divergence");
assert(!src->real_space_representation);
*dst = 0.0;
dst->real_space_representation = false;
kk->CLOOP_K2(
[&](ptrdiff_t cindex,
ptrdiff_t xindex,
ptrdiff_t yindex,
ptrdiff_t zindex,
double k2){
dst->cval(cindex, 0) = -(kk->kx[xindex]*src->cval(cindex, 0, 1) +
kk->ky[yindex]*src->cval(cindex, 1, 1) +
kk->kz[zindex]*src->cval(cindex, 2, 1));
dst->cval(cindex, 1) = (kk->kx[xindex]*src->cval(cindex, 0, 0) +
kk->ky[yindex]*src->cval(cindex, 1, 0) +
kk->kz[zindex]*src->cval(cindex, 2, 0));
});
return EXIT_SUCCESS;
}
template <typename rnumber,
field_backend be,
kspace_dealias_type dt>
......@@ -2075,6 +2104,77 @@ field<rnumber, be, fc> &field<rnumber, be, fc>::operator=(
return *this;
}
template <typename rnumber,
field_backend be,
field_components fc,
kspace_dealias_type dt>
int make_gaussian_random_field(
kspace<be, dt> *kk,
field<rnumber, be, fc> *output_field,
const int rseed,
const double slope,
const double k_cutoff,
const double coefficient)
{
TIMEZONE("make_gaussian_random_field");
// initialize a separate random number generator for each thread
std::vector<std::mt19937_64> rgen;
std::normal_distribution<rnumber> rdist;
rgen.resize(omp_get_max_threads());
// seed random number generators such that no seed is ever repeated if we change the value of rseed.
// basically use a multi-dimensional array indexing technique to come up with actual seed.
// Note: this method IS NOT MPI/OpenMP-invariant!
for (int thread_id=0; thread_id < omp_get_max_threads(); thread_id++)
{
int current_seed = (