diff --git a/CMakeLists.txt b/CMakeLists.txt index c3a0fd2b0dbc5a1be93c2694fd625fa5897b9206..7afc964105aaaecefeeea9fa4abd1944f2e24751 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -370,7 +370,8 @@ set(cpp_for_lib ${PROJECT_SOURCE_DIR}/cpp/particles/inner/particles_inner_computer.cpp ${PROJECT_SOURCE_DIR}/cpp/full_code/ornstein_uhlenbeck_process.cpp ${PROJECT_SOURCE_DIR}/cpp/full_code/ou_vorticity_equation.cpp - ${PROJECT_SOURCE_DIR}/cpp/full_code/test_interpolation_methods.cpp) + ${PROJECT_SOURCE_DIR}/cpp/full_code/test_interpolation_methods.cpp + ${PROJECT_SOURCE_DIR}/cpp/full_code/test_particle_integration.cpp) set(hpp_for_lib ${PROJECT_SOURCE_DIR}/cpp/full_code/code_base.hpp @@ -474,7 +475,8 @@ set(hpp_for_lib ${PROJECT_SOURCE_DIR}/cpp/spectrum_function.hpp ${PROJECT_SOURCE_DIR}/cpp/full_code/ornstein_uhlenbeck_process.hpp ${PROJECT_SOURCE_DIR}/cpp/full_code/ou_vorticity_equation.hpp - ${PROJECT_SOURCE_DIR}/cpp/full_code/test_interpolation_methods.hpp) + ${PROJECT_SOURCE_DIR}/cpp/full_code/test_interpolation_methods.hpp + ${PROJECT_SOURCE_DIR}/cpp/full_code/test_particle_integration.hpp) if(GSL_FOUND) LIST(APPEND cpp_for_lib ${PROJECT_SOURCE_DIR}/cpp/particles/rhs/deformation_tensor_rhs.cpp) diff --git a/TurTLE/TEST.py b/TurTLE/TEST.py index d173514faa36999e04a8cececc50e58aad194460..6914045e5278d7c113ed98ac855642d40d41d9af 100644 --- a/TurTLE/TEST.py +++ b/TurTLE/TEST.py @@ -133,6 +133,11 @@ class TEST(_code): if dns_type == 'test_interpolation_methods': pars['nxparticles'] = 160 pars['nzparticles'] = 180 + if dns_type == 'test_particle_integration': + pars['nxparticles'] = 13 + pars['nzparticles'] = 11 + pars['dt'] = 0.125 + pars['niter_todo'] = 128 if dns_type == 'phase_shift_test': pars['random_phase_seed'] = 1 if dns_type in ['dealias_test', 'Gauss_field_test']: @@ -344,6 +349,9 @@ class TEST(_code): parser_test_interpolation_methods = subparsers.add_parser( 'test_interpolation_methods', help = 'test interpolation methods') + parser_test_particle_integration = subparsers.add_parser( + 'test_particle_integration', + help = 'test interpolation methods') parser_ornstein_uhlenbeck_test = subparsers.add_parser( 'ornstein_uhlenbeck_test', help = 'test ornstein uhlenbeck process') @@ -361,6 +369,7 @@ class TEST(_code): 'field_output_test', 'test_interpolation', 'test_interpolation_methods', + 'test_particle_integration', 'ornstein_uhlenbeck_test', 'test_tracer_set']: eval('self.simulation_parser_arguments(parser_' + parser + ')') @@ -416,6 +425,29 @@ class TEST(_code): (5, 2)]: ofile.create_group('particle/phi_{0}{1}'.format(n, m)) ofile.close() + if self.dns_type == 'test_particle_integration': + ofile = h5py.File(os.path.join(self.work_dir, self.simname + '_particles.h5'), 'w') + ofile.create_group('particle') + ofile.create_group('particle/position') + for n, m in [(0, 0), + (1, 1), + (2, 1), + (2, 2), + (3, 1), + (3, 2), + (4, 1), + (4, 2), + (5, 1), + (5, 2)]: + for integration_order in [1, 2, 4]: + for factor in [1, 2, 4, 8]: + ofile.create_group( + 'particle_o{0}_n{1}_m{2}_f{3}/position'.format( + integration_order, n, m, factor)) + ofile.create_group( + 'particle_o{0}_n{1}_m{2}_f{3}/velocity'.format( + integration_order, n, m, factor)) + ofile.close() if self.dns_type == 'test_interpolation': if type(particle_initial_condition) == type(None): pbase_shape = (self.parameters['nparticles'],) diff --git a/TurTLE/test/test_particle_integration.py b/TurTLE/test/test_particle_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..026de7dd900e2591adb14af55686b7c3e6a67d4d --- /dev/null +++ b/TurTLE/test/test_particle_integration.py @@ -0,0 +1,115 @@ +import os +import numpy as np +import h5py +import matplotlib.pyplot as plt + +from TurTLE import TEST + +def main(): + c = TEST() + if not os.path.exists('test.h5'): + c.launch([ + 'test_particle_integration', + '--np', '4', + '--ntpp', '3']) + + data_file = h5py.File(c.simname + '_particles.h5', 'r') + + vv = data_file['particle_o2_n1_m1_f1/velocity/0'][()] + print(np.min(vv), np.max(vv)) + + + f = plt.figure(figsize = (9, 9)) + a = f.add_subplot(111) + + for ff in [1, 2, 4, 8]: + gg = data_file['particle_o2_n2_m1_f{0}/position'.format(ff)] + xx = read_trajectory(gg, 5, 7) + a.plot(xx[:, 0], xx[:, 2], marker = '.') + f.tight_layout() + f.savefig('test_particle_integration_trajectory.pdf') + plt.close(f) + + f = plt.figure(figsize = (9, 9)) + a = f.add_subplot(111) + dt0 = c.get_data_file()['parameters/dt'][()] + dtlist = [dt0, dt0/2, dt0/4] + for n, m in [(0, 0), + (1, 1), + (2, 1), + (3, 2)]: + for oo in [1, 2, 4]: + err_list = [] + for factor in [1, 2, 4]: + xx1 = data_file['particle_o{0}_n{1}_m{2}_f{3}/position/{4}'.format(oo, n, m, factor, factor*8)][()] + xx2 = data_file['particle_o{0}_n{1}_m{2}_f{3}/position/{4}'.format(oo, n, m, factor*2, factor*16)][()] + err_list.append(np.mean(np.abs(xx2 - xx1))) + a.plot(dtlist, + err_list, + label = 'o{0}_n{1}_m{2}'.format(oo, n, m), + marker = '.') + a.plot(dtlist, + np.array(dtlist)**2 * (1e-2), + color = (0, 0, 0), + dashes = (2, 2)) + a.legend(loc = 'best') + a.set_xscale('log') + a.set_yscale('log') + f.tight_layout() + f.savefig('test_particle_integration_evdt.pdf') + plt.close(f) + + #f = plt.figure(figsize = (6, 6)) + #a = f.add_subplot(111) + #err_mean_1 = [] + #err_max_1 = [] + #n = 1 + #phi0 = data_file['particle/phi_{0}{1}/0'.format(n , 1)][()] + #phi1 = data_file['particle/phi_{0}{1}/0'.format(n+1, 1)][()] + #err = np.abs(phi0 - phi1) + #err_mean_1.append(err.mean()) + #err_max_1.append(err.max()) + #err_mean_2 = [] + #err_max_2 = [] + #for n in [2, 3, 4]: + # phi0 = data_file['particle/phi_{0}{1}/0'.format(n , 1)][()] + # phi1 = data_file['particle/phi_{0}{1}/0'.format(n+1, 1)][()] + # err = np.abs(phi0 - phi1) + # err_mean_1.append(err.mean()) + # err_max_1.append(err.max()) + # phi0 = data_file['particle/phi_{0}{1}/0'.format(n , 2)][()] + # phi1 = data_file['particle/phi_{0}{1}/0'.format(n+1, 2)][()] + # err = np.abs(phi0 - phi1) + # err_mean_2.append(err.mean()) + # err_max_2.append(err.max()) + #neighbour_list = np.array([1, 2, 3, 4]).astype(np.float) + #a.plot(neighbour_list, err_mean_1, marker = '.', label = 'm=1, mean') + #a.plot(neighbour_list, err_max_1, marker = '.', label = 'm=1, max') + #a.plot(neighbour_list, 1e-4*neighbour_list**(-4), color = 'black', dashes = (2, 2)) + #neighbour_list = np.array([2, 3, 4]).astype(np.float) + #a.plot(neighbour_list, err_mean_2, marker = '.', label = 'm=2, mean') + #a.plot(neighbour_list, err_max_2, marker = '.', label = 'm=2, max') + + #a.set_xscale('log') + #a.set_yscale('log') + #a.legend(loc = 'best') + + #f.tight_layout() + #f.savefig('test_interpolation_methods_err.pdf') + #plt.close(f) + + #data_file.close() + return None + +def read_trajectory(group, ix, iz): + iter_names = group.keys() + iterations = np.sort([int(ii) for ii in iter_names]) + + xx = [] + for ii in iterations: + xx.append(group['{0}'.format(ii)][iz, ix]) + return np.array(xx) + +if __name__ == '__main__': + main() + diff --git a/cpp/full_code/test_particle_integration.cpp b/cpp/full_code/test_particle_integration.cpp new file mode 100644 index 0000000000000000000000000000000000000000..92d5eff4582b20f6aa9949606b8924d705767002 --- /dev/null +++ b/cpp/full_code/test_particle_integration.cpp @@ -0,0 +1,263 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Computing and Data Facility * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +#include "test_particle_integration.hpp" +#include "particles/particles_input_grid.hpp" +#include "particles/rhs/tracer_rhs.hpp" + +template <typename rnumber> +int test_particle_integration<rnumber>::initialize() +{ + TIMEZONE("test_particle_integration::initialize"); + this->read_parameters(); + + this->velocity_back = new field<rnumber, FFTW, THREE>( + nx, ny, nz, + this->comm, + FFTW_ESTIMATE); + + this->kk = new kspace<FFTW, SMOOTH>( + this->velocity_back->clayout, + this->dkx, this->dky, this->dkz); + + this->velocity_front = new field_tinterpolator<rnumber, FFTW, THREE, NONE>(); + this->velocity_front->set_field(this->velocity_back); + + if (this->myrank == 0) + { + hid_t stat_file = H5Fopen( + (this->simname + std::string(".h5")).c_str(), + H5F_ACC_RDWR, + H5P_DEFAULT); + this->kk->store(stat_file); + H5Fclose(stat_file); + } + + return EXIT_SUCCESS; +} + + +template <typename rnumber> +template <int neighbours, int smoothness> +int test_particle_integration<rnumber>::integrate( + const int order, + const int factor, + const int total_iterations) +{ + TIMEZONE("test_particle_integration::integrate"); + DEBUG_MSG("entered integrate<%d, %d>(order=%d, factor=%d, iterations=%d)\n", + neighbours, + smoothness, + order, + factor, + total_iterations); + // create a particle object + particle_set<3, neighbours, smoothness> pset( + this->velocity_back->rlayout, + this->kk->dkx, + this->kk->dky, + this->kk->dkz); + + // initialize particles + particles_input_grid<long long int, double, 3> pinput( + this->comm, + this->nxparticles, + this->nzparticles, + pset.getSpatialLowLimitZ(), + pset.getSpatialUpLimitZ()); + + pset.init(pinput); + + tracer_rhs<rnumber, FFTW, NONE> rhs; + rhs.setVelocity(this->velocity_front); + + particle_solver psolver(pset, 0); + + const std::string species_name = ( + "particle_o" + std::to_string(order) + + "_n" + std::to_string(neighbours) + + "_m" + std::to_string(smoothness) + + "_f" + std::to_string(factor)); + + for (int tt = 0; tt < total_iterations*factor; tt++) + { + // output + if (tt % 4 == 0) + { + pset.writeStateTriplet( + 0, + this->particles_sample_writer_mpi, + species_name, + "position", + tt); + pset.writeSample( + this->velocity_back, + this->particles_sample_writer_mpi, + species_name, + "velocity", + tt); + } + // time step + switch(order) + { + case 1: + psolver.Euler(this->dt / factor, rhs); + break; + case 2: + psolver.Heun(this->dt / factor, rhs); + break; + case 4: + psolver.cRK4(this->dt / factor, rhs); + break; + default: + assert(false); + break; + } + } + // output final state + { + int tt = total_iterations*factor; + pset.writeStateTriplet( + 0, + this->particles_sample_writer_mpi, + species_name, + "position", + tt); + pset.writeSample( + this->velocity_back, + this->particles_sample_writer_mpi, + species_name, + "velocity", + tt); + } + + return EXIT_SUCCESS; +} + + +template <typename rnumber> +int test_particle_integration<rnumber>::do_work() +{ + TIMEZONE("test_particle_integration::do_work"); + + // create field + make_gaussian_random_field( + this->kk, + this->velocity_back, + this->random_seed, + 0.4, // dissipation + 1., // Lint + 4*acos(0) / this->nx, // etaK + 6.78, // default + 0.40, // default + 3*3./2); // 3/2 to account for divfree call below + // 3 because I want a larger amplitude + this->kk->force_divfree<rnumber>(this->velocity_back->get_cdata()); + + // take field to real space + this->velocity_back->ift(); + + // create a particle object + particle_set<3, 1, 1> pset( + this->velocity_back->rlayout, + this->kk->dkx, + this->kk->dky, + this->kk->dkz); + particles_input_grid<long long int, double, 3> pinput( + this->comm, + this->nxparticles, + this->nzparticles, + pset.getSpatialLowLimitZ(), + pset.getSpatialUpLimitZ()); + + pset.init(pinput); + + // initialize writer + this->particles_sample_writer_mpi = new particles_output_sampling_hdf5< + long long int, double, 3>( + this->comm, + pset.getTotalNumberOfParticles(), + (this->simname + "_particles.h5"), + "particle", + "position/0"); + this->particles_sample_writer_mpi->setParticleFileLayout(pset.getParticleFileLayout()); + + // test integration + for (int oo = 1; oo < 5; oo *= 2) + for (int ff = 1; ff < 9; ff *= 2) + { + this->template integrate<0, 0>(oo, ff, this->niter_todo); + this->template integrate<1, 1>(oo, ff, this->niter_todo); + this->template integrate<2, 1>(oo, ff, this->niter_todo); + this->template integrate<3, 1>(oo, ff, this->niter_todo); + this->template integrate<4, 1>(oo, ff, this->niter_todo); + this->template integrate<5, 1>(oo, ff, this->niter_todo); + this->template integrate<2, 2>(oo, ff, this->niter_todo); + this->template integrate<3, 2>(oo, ff, this->niter_todo); + this->template integrate<4, 2>(oo, ff, this->niter_todo); + this->template integrate<5, 2>(oo, ff, this->niter_todo); + } + + delete this->particles_sample_writer_mpi; + + return EXIT_SUCCESS; +} + + +template <typename rnumber> +int test_particle_integration<rnumber>::finalize() +{ + TIMEZONE("test_particle_integration::finalize"); + delete this->velocity_front; + delete this->velocity_back; + delete this->kk; + return EXIT_SUCCESS; +} + + +template <typename rnumber> +int test_particle_integration<rnumber>::read_parameters() +{ + TIMEZONE("test_particle_integration::read_parameters"); + this->test::read_parameters(); + + hid_t parameter_file = H5Fopen( + (this->simname + std::string(".h5")).c_str(), + H5F_ACC_RDONLY, + H5P_DEFAULT); + this->nxparticles = hdf5_tools::read_value<int>(parameter_file, "/parameters/nxparticles"); + this->nzparticles = hdf5_tools::read_value<int>(parameter_file, "/parameters/nzparticles"); + this->dt = hdf5_tools::read_value<double>(parameter_file, "/parameters/dt"); + this->niter_todo = hdf5_tools::read_value<int>(parameter_file, "/parameters/niter_todo"); + this->random_seed = hdf5_tools::read_value<int>(parameter_file, "/parameters/field_random_seed"); + H5Fclose(parameter_file); + MPI_Barrier(this->comm); + return EXIT_SUCCESS; +} + + +template class test_particle_integration<float>; +template class test_particle_integration<double>; + diff --git a/cpp/full_code/test_particle_integration.hpp b/cpp/full_code/test_particle_integration.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c303e080d21b04600720bd43207214af24a14a4a --- /dev/null +++ b/cpp/full_code/test_particle_integration.hpp @@ -0,0 +1,89 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Computing and Data Facility * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +#ifndef TEST_PARTICLE_INTEGRATION_HPP +#define TEST_PARTICLE_INTEGRATION_HPP + + + +#include <cstdlib> +#include "base.hpp" +#include "kspace.hpp" +#include "full_code/test.hpp" +#include "particles/interpolation/field_tinterpolator.hpp" +#include "particles/interpolation/particle_set.hpp" +#include "particles/particle_solver.hpp" + +/** \brief Test of interpolation methods. + * + */ + +template <typename rnumber> +class test_particle_integration: public test +{ + public: + int nxparticles; + int nzparticles; + double dt; + int niter_todo; + int random_seed; + + + field_tinterpolator<rnumber, FFTW, THREE, NONE> *velocity_front; + + particles_output_sampling_hdf5<long long int, double, 3> *particles_sample_writer_mpi; + + field<rnumber, FFTW, THREE> *velocity_back; + + kspace<FFTW, SMOOTH> *kk; + + test_particle_integration( + const MPI_Comm COMMUNICATOR, + const std::string &simulation_name): + test( + COMMUNICATOR, + simulation_name), + particles_sample_writer_mpi(nullptr), + velocity_front(nullptr), + velocity_back(nullptr), + kk(nullptr), + random_seed(0){} + ~test_particle_integration(){} + + int initialize(void); + int do_work(void); + int finalize(void); + + int read_parameters(void); + + template <int neighbours, int smoothness> + int integrate( + const int order, + const int factor, + const int total_iterations); +}; + +#endif//TEST_PARTICLE_INTEGRATION_HPP + diff --git a/cpp/particles/particle_solver.cpp b/cpp/particles/particle_solver.cpp index 453902681288ab431bc7609628f225ce1e4a8f22..f53b24bdf731f70dd7437966baecac1571583f53 100644 --- a/cpp/particles/particle_solver.cpp +++ b/cpp/particles/particle_solver.cpp @@ -163,6 +163,147 @@ int particle_solver::Heun( return EXIT_SUCCESS; } +/** \brief Implementation of the classic 4th order Runge Kutta integration method + * + * This is a fourth order Runge-Kutta time-stepping algorithm. + * We integrate the following first oder ODE: + * \f[ + * x'(t) = f(t, x(t)). + * \f] + * The initial condition is \f$ x_n \f$ (for time \f$ t_n \f$), and we would + * like to integrate the equation forward for an interval \f$ h \f$. + * Then we must apply the following formulas: + * \f{eqnarray*}{ + * k_1 &=& f(t_n, x_n) \\ + * k_2 &=& f(t_n+frac{h}{2}, x_n + \frac{h}{2} k_1) \\ + * k_3 &=& f(t_n+frac{h}{2}, x_n + \frac{h}{2} k_2) \\ + * k_4 &=& f(t_n+h, x_n + h k_3) \\ + * x_{n+1} &=& x_n + \frac{h}{6}(k_1 + 2 k_2 + 2 k_3 + k_4) + * \f} + * + */ +int particle_solver::cRK4( + double dt, + abstract_particle_rhs &rhs) +{ + TIMEZONE("particle_solver::Euler"); + assert(this->pset->getStateSize() == rhs.getStateSize()); + // temporary arrays for storing the two right-hand-side values + // We need to be able to redistribute the initial condition for computations to make sense, + // so we must put everything in the same vector. + std::vector<std::unique_ptr<particle_rnumber[]>> extra_values; + std::unique_ptr<particle_rnumber[]> xx, kcurrent; + + // allocate extra_values + // I use the following convention: + // extra_values[0] = x0 + // extra_values[1] = k1 + // extra_values[2] = k2 + // extra_values[3] = k3 + // extra_values[4] = k4 + extra_values.resize(5); + for (int temp_counter = 0; temp_counter < 5; temp_counter++) + { + extra_values[temp_counter].reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + } + + // store initial condition + this->pset->getParticleState(extra_values[0].get()); + + /*** COMPUTE K1 ***/ + kcurrent.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + // call to rhs will possibly change local ordering, but local number of particles remains the same + rhs(0, *(this->pset), kcurrent.get(), extra_values); + // store k1 values + this->pset->copy_state_tofrom(extra_values[1], kcurrent); + + /*** COMPUTE xn+h*k1/2 ***/ + // allocate xx + xx.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + this->pset->LOOP( + [&](const partsize_t idx){ + xx[idx] = extra_values[0][idx] + 0.5*dt*extra_values[1][idx]; + }); + // copy the temporary state to the particle set, and redistribute + // we MUST redistribute, because we want to interpolate. + // we MUST shuffle values according to the same redistribution, otherwise + // the subsequent computations are meaningless. + // this will possibly change local ordering AND/OR local number of particles + this->pset->setParticleState(xx.get()); + this->pset->redistribute(extra_values); + + /*** COMPUTE K2 ***/ + kcurrent.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + rhs(0.5, *(this->pset), kcurrent.get(), extra_values); + this->pset->copy_state_tofrom(extra_values[2], kcurrent); + + /*** COMPUTE xn+h*k2/2 ***/ + xx.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + this->pset->LOOP( + [&](const partsize_t idx){ + xx[idx] = extra_values[0][idx] + 0.5*dt*extra_values[2][idx]; + }); + this->pset->setParticleState(xx.get()); + this->pset->redistribute(extra_values); + + /*** COMPUTE K3 ***/ + kcurrent.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + rhs(0.5, *(this->pset), kcurrent.get(), extra_values); + this->pset->copy_state_tofrom(extra_values[3], kcurrent); + + /*** COMPUTE xn+h*k3 ***/ + xx.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + this->pset->LOOP( + [&](const partsize_t idx){ + xx[idx] = extra_values[0][idx] + dt*extra_values[3][idx]; + }); + this->pset->setParticleState(xx.get()); + this->pset->redistribute(extra_values); + + /*** COMPUTE K4 ***/ + kcurrent.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + rhs(0.5, *(this->pset), kcurrent.get(), extra_values); + this->pset->copy_state_tofrom(extra_values[4], kcurrent); + + /*** COMPUTE xn+h*(k1 + 2*k2 + 2*k3 + k4)/6 ***/ + xx.reset(new particle_rnumber[ + this->pset->getLocalNumberOfParticles()* + this->pset->getStateSize()]); + this->pset->LOOP( + [&](const partsize_t idx){ + xx[idx] = extra_values[0][idx] + + dt*(extra_values[1][idx] + + 2*extra_values[2][idx] + + 2*extra_values[3][idx] + + extra_values[4][idx]) / 6; + }); + this->pset->setParticleState(xx.get()); + extra_values.resize(0); + this->pset->redistribute(extra_values); + + // impose model constraints + rhs.imposeModelConstraints(*(this->pset)); + + // and we are done + return EXIT_SUCCESS; +} + template <int state_size> int particle_solver::writeCheckpoint( diff --git a/cpp/particles/particle_solver.hpp b/cpp/particles/particle_solver.hpp index e389add17a8d01e1102795f1255fbb6baada4fc9..004581c8961364714fdbfd775ad683be43748117 100644 --- a/cpp/particles/particle_solver.hpp +++ b/cpp/particles/particle_solver.hpp @@ -109,6 +109,10 @@ class particle_solver int Heun( double dt, abstract_particle_rhs &rhs); + + int cRK4( + double dt, + abstract_particle_rhs &rhs); }; #endif//PARTICLE_SOLVER_HPP diff --git a/cpp/particles/particles_distr_mpi.hpp b/cpp/particles/particles_distr_mpi.hpp index b7d0f9f8da5c903b4f358cd3ca261f44b7325c00..7a4d646c2e663aa31a972a8c488e9c6aca6f6e18 100644 --- a/cpp/particles/particles_distr_mpi.hpp +++ b/cpp/particles/particles_distr_mpi.hpp @@ -283,10 +283,10 @@ public: if(descriptor.nbParticlesToSend){ whatNext.emplace_back(std::pair<Action,int>{NOTHING_TODO, -1}); mpiRequests.emplace_back(); - DEBUG_MSG("descriptor.nparticlestosend = %d, size_particle_positions = %d, std::numeric_limits = %d\n", - descriptor.nbParticlesToSend, - size_particle_positions, - std::numeric_limits<int>::max()); + //DEBUG_MSG("descriptor.nparticlestosend = %d, size_particle_positions = %d, std::numeric_limits = %d\n", + // descriptor.nbParticlesToSend, + // size_particle_positions, + // std::numeric_limits<int>::max()); assert(descriptor.nbParticlesToSend*size_particle_positions < std::numeric_limits<int>::max()); AssertMpi(MPI_Isend(const_cast<real_number*>(&particles_positions[0]), int(descriptor.nbParticlesToSend*size_particle_positions), particles_utils::GetMpiType(real_number()), descriptor.destProc, TAG_SHIFT_OFFSET*counter_shift_tags + TAG_LOW_UP_PARTICLES, current_com, &mpiRequests.back())); @@ -565,7 +565,7 @@ public: return; } - {// TODO remove + {// this fails when particles move more than one cell during a single time-step partsize_t partOffset = 0; for(int idxPartition = 0 ; idxPartition < current_partition_size ; ++idxPartition){ for(partsize_t idx = 0 ; idx < current_my_nb_particles_per_partition[idxPartition] ; ++idx){