diff --git a/CMakeLists.txt b/CMakeLists.txt index 9a16ec039f9b6b3569b4c693296bcd3e6c0ba037..fb1e3bc323961b494951fc8e785db732eb7f7ff7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -383,6 +383,12 @@ if (BUILD_TESTING) NAME test_documentation COMMAND rst-lint README.rst WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) + ### test copying of parameters + add_test( + NAME test_parameter_copy + COMMAND turtle.test_parameter_copy + WORKING_DIRECTORY ${TEST_OUTPUT_DIRECTORY}) + set_tests_properties(test_parameter_copy PROPERTIES TIMEOUT 100) ### basic functionality add_test( NAME test_shared_array @@ -401,6 +407,11 @@ if (BUILD_TESTING) COMMAND turtle.test_statistics WORKING_DIRECTORY ${TEST_OUTPUT_DIRECTORY}) ### basic particle functionality + add_test( + NAME test_particle_set_init + COMMAND turtle.test_particle_set_init + WORKING_DIRECTORY ${TEST_OUTPUT_DIRECTORY}) + set_tests_properties(test_particle_set_init PROPERTIES TIMEOUT 200) add_test( NAME test_particles COMMAND turtle.test_particles p2p_sampling on @@ -415,12 +426,6 @@ if (BUILD_TESTING) NAME test_tracer_set COMMAND turtle TEST test_tracer_set -n 32 --np 2 --ntpp 2 --simname tracer_set_testsim WORKING_DIRECTORY ${TEST_OUTPUT_DIRECTORY}) - ### test copying of parameters - add_test( - NAME test_parameter_copy - COMMAND turtle.test_parameter_copy - WORKING_DIRECTORY ${TEST_OUTPUT_DIRECTORY}) - set_tests_properties(test_parameter_copy PROPERTIES TIMEOUT 100) ### compare DNS output to stored results add_test( NAME test_NSVEparticles diff --git a/TurTLE/test/particle_set/test_particle_set_init.cpp b/TurTLE/test/particle_set/test_particle_set_init.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ef6fc26f54d6dc820c5721de75c0f92878df624 --- /dev/null +++ b/TurTLE/test/particle_set/test_particle_set_init.cpp @@ -0,0 +1,110 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Computing and Data Facility * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +//#include "test_particle_set_init.hpp" +#include "particles/interpolation/particle_set.hpp" +#include "particles/particle_set_input_random.hpp" +#include "particles/particle_set_input_hdf5.hpp" + +template <typename rnumber> +int test_particle_set_init<rnumber>::initialize(void) +{ + TIMEZONE("test_particle_set_init::initialize"); + this->read_parameters(); + return EXIT_SUCCESS; +} + +template <typename rnumber> +int test_particle_set_init<rnumber>::do_work(void) +{ + TIMEZONE("test_particle_set_init::do_work"); + const double twopi = 4*acos(0); + field<rnumber, FFTW, THREE> vel(this->nx, this->ny, this->nz, this->comm, FFTW_ESTIMATE); + + // generate particle set + particle_set<3, 2, 1> pset0(vel.rlayout, this->dkx, this->dky, this->dkz); + particle_set_input_random<long long int, double, 3> pir(this->comm, 10, 1, 0, twopi); + pset0.init(pir); + + long long int *tmp_label = new long long int[pset0.getLocalNumberOfParticles()]; + std::copy(pset0.getParticleLabels(), + pset0.getParticleLabels()+pset0.getLocalNumberOfParticles(), + tmp_label); + tmp_label[2] = 4; + tmp_label[3] = 4; + + pset0.setParticleLabels(tmp_label); + pset0.writeParticleLabels( + this->simname + std::string("_particles.h5"), + "tracers0", + "label", + 0); + pset0.writeParticleStates<3>( + this->simname + std::string("_particles.h5"), + "tracers0", + "state", + 0); + + particle_set<3, 1, 1> pset1(vel.rlayout, this->dkx, this->dky, this->dkz); + particle_set_input_hdf5<long long int, double, 3> pif( + this->comm, + this->simname + std::string("_particles.h5"), + "tracers0", + 0, + 0, + twopi); + pset1.init(pif); + pset1.writeParticleLabels( + this->simname + std::string("_particles.h5"), + "tracers1", + "label", + 0); + pset1.writeParticleStates<3>( + this->simname + std::string("_particles.h5"), + "tracers1", + "state", + 0); + + + return EXIT_SUCCESS; +} + +template <typename rnumber> +int test_particle_set_init<rnumber>::finalize(void) +{ + TIMEZONE("test_particle_set_init::finalize"); + return EXIT_SUCCESS; +} + +template <typename rnumber> +int test_particle_set_init<rnumber>::read_parameters(void) +{ + TIMEZONE("test_particle_set_init::read_parameters"); + this->test::read_parameters(); + return EXIT_SUCCESS; +} + +template class test_particle_set_init<float>; +template class test_particle_set_init<double>; diff --git a/TurTLE/test/particle_set/test_particle_set_init.hpp b/TurTLE/test/particle_set/test_particle_set_init.hpp new file mode 100644 index 0000000000000000000000000000000000000000..966cdad64b950fd6a9cfa1448ceaca056fbf0765 --- /dev/null +++ b/TurTLE/test/particle_set/test_particle_set_init.hpp @@ -0,0 +1,51 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Computing and Data Facility * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +#ifndef TEST_PARTICLE_SET_INIT_HPP +#define TEST_PARTICLE_SET_INIT_HPP + + + +#include "full_code/test.hpp" + +template <typename rnumber> +class test_particle_set_init: public test +{ + public: + test_particle_set_init( + const MPI_Comm COMMUNICATOR, + const std::string &simulation_name): + test( + COMMUNICATOR, + simulation_name){} + ~test_particle_set_init(){} + + int initialize(void); + int do_work(void); + int finalize(void); + int read_parameters(void); +}; + +#endif//TEST_PARTICLE_SET_INIT_HPP diff --git a/TurTLE/test/test_particle_set_init.py b/TurTLE/test/test_particle_set_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5af878645515bdbc4bf84191d7384f67871cea86 --- /dev/null +++ b/TurTLE/test/test_particle_set_init.py @@ -0,0 +1,100 @@ +import os +import sys +import numpy as np +import h5py +import matplotlib.pyplot as plt + +import TurTLE +from TurTLE import TEST + +cpp_location = os.path.join(TurTLE.data_dir, 'particle_set') + +class aTEST(TEST): + def write_src( + self): + self.version_message = ( + '/***********************************************************************\n' + + '* this code automatically generated by TurTLE\n' + + '* version {0}\n'.format(TurTLE.__version__) + + '***********************************************************************/\n\n\n') + self.include_list = [ + '"base.hpp"', + '"scope_timer.hpp"', + '"fftw_interface.hpp"', + '"full_code/main_code.hpp"', + '"full_code/NSVEparticles.hpp"', + '<cmath>', + '<iostream>', + '<hdf5.h>', + '<string>', + '<cstring>', + '<fftw3-mpi.h>', + '<omp.h>', + '<cfenv>', + '<cstdlib>'] + self.main = """ + int main(int argc, char *argv[]) + {{ + bool fpe = ( + (getenv("BFPS_FPE_OFF") == nullptr) || + (getenv("BFPS_FPE_OFF") != std::string("TRUE"))); + return main_code< {0} >(argc, argv, fpe); + }} + """.format(self.dns_type + '<{0}>'.format(self.C_field_dtype)) + self.includes = '\n'.join( + ['#include ' + hh + for hh in self.include_list]) + self.definitions = open( + os.path.join( + cpp_location, self.dns_type + '.hpp'), 'r').read() + self.definitions += open( + os.path.join( + cpp_location, self.dns_type + '.cpp'), 'r').read() + with open(self.name + '.cpp', 'w') as outfile: + outfile.write(self.version_message + '\n\n') + outfile.write(self.includes + '\n\n') + outfile.write(self.definitions + '\n\n') + outfile.write(self.main + '\n') + self.check_current_vorticity_exists = False + self.check_current_velocity_exists = False + return None + def launch( + self, + args = [], + **kwargs): + self.simname = 'test_pset_init_functionality' + self.dns_type = 'test_particle_set_init' + self.C_field_dtype = 'double' + self.fluid_precision = 'double' + self.name = 'test_particle_set_init_' + self.fluid_precision + if not os.path.exists(self.get_data_file_name()): + self.write_par() + self.prepare_particle_file() + self.run(1, 1) + return None + def prepare_particle_file(self): + with h5py.File(self.simname + '_particles.h5', 'w') as ofile: + for species in ['tracers0', 'tracers1']: + ofile.create_group(species) + ofile[species].create_group('label') + ofile[species].create_group('state') + return None + + +def main(): + c = aTEST() + c.launch() + df = h5py.File(c.simname + '_particles.h5', 'r') + ll0 = df['tracers0/label/0'][()] + ll1 = df['tracers1/label/0'][()] + assert((ll0 == ll1).all()) + xx0 = df['tracers0/state/0'][()] + xx1 = df['tracers1/state/0'][()] + assert((xx0 == xx1).all()) + df.close() + print('SUCCESS, particle data was copied correctly') + return None + +if __name__ == '__main__': + main() + diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 7c665f6c398110b48d3cc725e4a39450c1698b4b..bbce88b035aeeb7043e5150762a6e35916e4a8f8 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -181,6 +181,9 @@ set(hpp_for_lib ${CMAKE_CURRENT_LIST_DIR}/particles/particles_system_builder.hpp ${CMAKE_CURRENT_LIST_DIR}/particles/particles_system.hpp ${CMAKE_CURRENT_LIST_DIR}/particles/particles_utils.hpp + ${CMAKE_CURRENT_LIST_DIR}/particles/particle_set_input.hpp + ${CMAKE_CURRENT_LIST_DIR}/particles/particle_set_input_random.hpp + ${CMAKE_CURRENT_LIST_DIR}/particles/particle_set_input_hdf5.hpp ${CMAKE_CURRENT_LIST_DIR}/full_code/main_code.hpp ${CMAKE_CURRENT_LIST_DIR}/full_code/codes_with_no_output.hpp ${CMAKE_CURRENT_LIST_DIR}/full_code/NSVE_no_output.hpp diff --git a/cpp/full_code/test_tracer_set.hpp b/cpp/full_code/test_tracer_set.hpp index b6d0ad4a0a68500639fea1f5b1e214a2fa61aad4..6144d3df456cd660171a5dc6a7d9caafd4a47f3a 100644 --- a/cpp/full_code/test_tracer_set.hpp +++ b/cpp/full_code/test_tracer_set.hpp @@ -29,12 +29,13 @@ -#include <cstdlib> #include "base.hpp" #include "kspace.hpp" #include "field.hpp" #include "full_code/test.hpp" +#include <cstdlib> + /** \brief A class for testing basic `particle_set` functionality. */ diff --git a/cpp/particles/abstract_particles_output.hpp b/cpp/particles/abstract_particles_output.hpp index af690dcbd72ca06cf756d1d639a6efaebfd6b677..75fbe3788ce1082c300f6f71d9a8546598d3e47d 100644 --- a/cpp/particles/abstract_particles_output.hpp +++ b/cpp/particles/abstract_particles_output.hpp @@ -349,6 +349,71 @@ public: virtual void write(const int idx_time_step, const position_type* positions, const std::unique_ptr<output_type[]>* rhs, const partsize_t nb_particles, const partsize_t particles_idx_offset, const int size_particle_rhs) = 0; + + void require_groups(std::string filename, std::string particle_species_name, std::string group_name){ + if (this->current_is_involved){ + if (this->my_rank == 0){ + bool file_exists = false; + { + struct stat file_buffer; + file_exists = (stat(filename.c_str(), &file_buffer) == 0); + } + hid_t file_id; + if (file_exists) + file_id = H5Fopen( + filename.c_str(), + H5F_ACC_RDWR | H5F_ACC_DEBUG, + H5P_DEFAULT); + else + file_id = H5Fcreate( + filename.c_str(), + H5F_ACC_EXCL | H5F_ACC_DEBUG, + H5P_DEFAULT, + H5P_DEFAULT); + assert(file_id >= 0); + bool group_exists = H5Lexists( + file_id, + particle_species_name.c_str(), + H5P_DEFAULT); + if (!group_exists) + { + hid_t gg = H5Gcreate( + file_id, + particle_species_name.c_str(), + H5P_DEFAULT, + H5P_DEFAULT, + H5P_DEFAULT); + assert(gg >= 0); + H5Gclose(gg); + } + hid_t gg = H5Gopen( + file_id, + particle_species_name.c_str(), + H5P_DEFAULT); + assert(gg >= 0); + group_exists = H5Lexists( + gg, + group_name.c_str(), + H5P_DEFAULT); + if (!group_exists) + { + hid_t ggg = H5Gcreate( + gg, + group_name.c_str(), + H5P_DEFAULT, + H5P_DEFAULT, + H5P_DEFAULT); + assert(ggg >= 0); + H5Gclose(ggg); + } + H5Gclose(gg); + H5Fclose(file_id); + + } + MPI_Barrier(this->mpi_com_writer); + } + } + }; #endif diff --git a/cpp/particles/interpolation/abstract_particle_set.hpp b/cpp/particles/interpolation/abstract_particle_set.hpp index af907584ddd55dedc7d8a6d5a6b59f6f01262928..b006d2a6fff17376f99776a982ea7634de9d3959 100644 --- a/cpp/particles/interpolation/abstract_particle_set.hpp +++ b/cpp/particles/interpolation/abstract_particle_set.hpp @@ -63,11 +63,23 @@ class abstract_particle_set // virtual destructor virtual ~abstract_particle_set() noexcept(false){} - // extract particle information + /** \brief Get pointer to particle state. + */ virtual particle_rnumber* getParticleState() const = 0; + /** \brief Get pointer to particle indices. + */ virtual partsize_t* getParticleIndices() const = 0; + /** \brief Get pointer to particle labels. + */ virtual partsize_t* getParticleLabels() const = 0; + /** \brief Copy particle labels from given pointer. + */ + virtual int setParticleLabels(partsize_t *) = 0; + /** \brief Copy particle state from given pointer. + */ virtual int setParticleState(particle_rnumber *) = 0; + /** \brief Copy particle state to given pointer. + */ virtual int getParticleState(particle_rnumber *) = 0; virtual std::unique_ptr<particle_rnumber[]> extractFromParticleState( @@ -208,7 +220,7 @@ class abstract_particle_set { TIMEZONE("abstract_particle_set::writeParticleLabels"); MPI_Barrier(MPI_COMM_WORLD); - particles_output_sampling_hdf5<partsize_t, particle_rnumber, partsize_t, 3> *particle_sample_writer = new particles_output_sampling_hdf5<partsize_t, double, partsize_t, 3>( + particles_output_sampling_hdf5<partsize_t, particle_rnumber, partsize_t, 3> *particle_sample_writer = new particles_output_sampling_hdf5<partsize_t, particle_rnumber, partsize_t, 3>( MPI_COMM_WORLD, this->getTotalNumberOfParticles(), file_name, @@ -242,6 +254,45 @@ class abstract_particle_set return EXIT_SUCCESS; } + template <int state_size> + int writeParticleStates( + const std::string file_name, + const std::string species_name, + const std::string field_name, + const int iteration) + { + TIMEZONE("abstract_particle_set::writeParticleStates"); + assert(state_size == this->getStateSize()); + MPI_Barrier(MPI_COMM_WORLD); + particles_output_sampling_hdf5<partsize_t, particle_rnumber, particle_rnumber, state_size> *particle_sample_writer = new particles_output_sampling_hdf5<partsize_t, particle_rnumber, particle_rnumber, state_size>( + MPI_COMM_WORLD, + this->getTotalNumberOfParticles(), + file_name, + species_name, + field_name + std::string("/") + std::to_string(iteration)); + // set file layout + particle_sample_writer->setParticleFileLayout(this->getParticleFileLayout()); + // allocate position array + std::unique_ptr<particle_rnumber[]> xx = this->extractFromParticleState(0, 3); + std::unique_ptr<particle_rnumber[]> ss = this->extractFromParticleState(0, state_size); + particle_sample_writer->template save_dataset<state_size>( + species_name, + field_name, + xx.get(), + &ss, + this->getParticleIndices(), + this->getLocalNumberOfParticles(), + iteration); + // deallocate sample writer + delete particle_sample_writer; + // deallocate position array + delete[] xx.release(); + // deallocate full state array + delete[] ss.release(); + MPI_Barrier(MPI_COMM_WORLD); + return EXIT_SUCCESS; + } + template <typename field_rnumber, field_backend be, field_components fc> diff --git a/cpp/particles/interpolation/particle_set.hpp b/cpp/particles/interpolation/particle_set.hpp index 5f5eb7aa1bf30ead417c0409fbb6d5a768d294f6..33f1ca447e72414c245b86a11560f5ce1df861b5 100644 --- a/cpp/particles/interpolation/particle_set.hpp +++ b/cpp/particles/interpolation/particle_set.hpp @@ -300,6 +300,13 @@ class particle_set: public abstract_particle_set return this->local_labels.get(); } + int setParticleLabels(partsize_t *src_local_labels) + { + for (partsize_t idx = 0; idx < this->getLocalNumberOfParticles(); idx++) + this->local_labels[idx] = src_local_labels[idx]; + return EXIT_SUCCESS; + } + partsize_t getLocalNumberOfParticles() const { return this->local_number_of_particles; diff --git a/cpp/particles/particle_set_input.hpp b/cpp/particles/particle_set_input.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b5bf38f808f8761f591d902189efc46953bb2e41 --- /dev/null +++ b/cpp/particles/particle_set_input.hpp @@ -0,0 +1,186 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Institute for Dynamics and Self-Organization * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +#ifndef PARTICLE_SET_INPUT_HPP +#define PARTICLE_SET_INPUT_HPP + +#include <mpi.h> +#include <cassert> +#include "particles/abstract_particles_input.hpp" + +template <class partsize_t, class particle_rnumber, int state_size> +class particle_set_input: public abstract_particles_input<partsize_t, particle_rnumber> +{ + protected: + MPI_Comm comm; + int myrank, nprocs; + + hsize_t total_number_of_particles; + hsize_t number_rhs; + partsize_t local_number_of_particles; + + std::unique_ptr<particle_rnumber[]> local_particle_state; + std::unique_ptr<partsize_t[]> local_particle_index; + std::unique_ptr<partsize_t[]> local_particle_label; + std::vector<particle_rnumber> in_spatial_limit_per_proc; + public: + ~particle_set_input() noexcept(false){} + particle_set_input( + const MPI_Comm in_mpi_comm, + const particle_rnumber my_spatial_low_limit, + const particle_rnumber my_spatial_up_limit): + comm(in_mpi_comm) + { + TIMEZONE("particles_input_grid::particles_input_grid"); + assert(state_size >= 3); + static_assert((std::is_same<particle_rnumber, double>::value || + std::is_same<particle_rnumber, float>::value), + "real_number must be double or float"); + AssertMpi(MPI_Comm_rank(comm, &myrank)); + AssertMpi(MPI_Comm_size(comm, &nprocs)); + this->in_spatial_limit_per_proc = BuildLimitsAllProcesses<particle_rnumber>( + this->comm, + my_spatial_low_limit, + my_spatial_up_limit); + assert(int(in_spatial_limit_per_proc.size()) == nprocs+1); + } + int permute( + particles_utils::IntervalSplitter<hsize_t> load_splitter, + std::unique_ptr<particle_rnumber[]> &split_particle_state, + std::unique_ptr<partsize_t[]> &split_particle_index, + std::unique_ptr<partsize_t[]> &split_particle_label) + { + // Permute + std::vector<partsize_t> nb_particles_per_proc(this->nprocs); + { + TIMEZONE("particles_input_grid::partition"); + + const particle_rnumber spatial_box_offset = in_spatial_limit_per_proc[0]; + const particle_rnumber spatial_box_width = + in_spatial_limit_per_proc[this->nprocs] - in_spatial_limit_per_proc[0]; + + partsize_t previousOffset = 0; + for(int idx_proc = 0 ; idx_proc < this->nprocs-1 ; ++idx_proc){ + const particle_rnumber limitPartitionShifted = + in_spatial_limit_per_proc[idx_proc+1] - spatial_box_offset; + const partsize_t localOffset = particles_utils::partition_extra<partsize_t, state_size>( + &split_particle_state[previousOffset*state_size], + partsize_t(load_splitter.getMySize())-previousOffset, + [&](const particle_rnumber val[]){ + const particle_rnumber shiftPos = val[IDXC_Z] - spatial_box_offset; + const particle_rnumber nbRepeat = floor(shiftPos/spatial_box_width); + const particle_rnumber posInBox = shiftPos - (spatial_box_width*nbRepeat); + return posInBox < limitPartitionShifted; + }, + [&](const partsize_t idx1, const partsize_t idx2){ + std::swap(split_particle_index[idx1], + split_particle_index[idx2]); + std::swap(split_particle_label[idx1], + split_particle_label[idx2]); + }, + previousOffset); + + nb_particles_per_proc[idx_proc] = localOffset; + previousOffset += localOffset; + } + nb_particles_per_proc[this->nprocs-1] = partsize_t(load_splitter.getMySize()) - previousOffset; + } + + { + TIMEZONE("particles_input_grid::exchanger"); + alltoall_exchanger exchanger( + this->comm, + std::move(nb_particles_per_proc)); + // nb_particles_per_processes cannot be used after due to move + + this->local_number_of_particles = exchanger.getTotalToRecv(); + + if(this->local_number_of_particles){ + this->local_particle_state.reset(new particle_rnumber[exchanger.getTotalToRecv()*state_size]); + } + exchanger.alltoallv<particle_rnumber>( + split_particle_state.get(), + this->local_particle_state.get(), + state_size); + delete[] split_particle_state.release(); + + if(this->local_number_of_particles){ + this->local_particle_index.reset(new partsize_t[exchanger.getTotalToRecv()]); + } + exchanger.alltoallv<partsize_t>( + split_particle_index.get(), + this->local_particle_index.get()); + delete[] split_particle_index.release(); + + if(this->local_number_of_particles){ + this->local_particle_label.reset(new partsize_t[exchanger.getTotalToRecv()]); + } + exchanger.alltoallv<partsize_t>( + split_particle_label.get(), + this->local_particle_label.get()); + delete[] split_particle_label.release(); + } + return EXIT_SUCCESS; + } + + partsize_t getTotalNbParticles() + { + return this->total_number_of_particles; + } + partsize_t getLocalNbParticles() + { + return this->local_number_of_particles; + } + int getNbRhs() + { + return 0; + } + + std::unique_ptr<particle_rnumber[]> getMyParticles() + { + assert(this->local_particle_state != nullptr || this->local_number_of_particles == 0); + return std::move(this->local_particle_state); + } + + std::unique_ptr<partsize_t[]> getMyParticlesIndexes() + { + assert(this->local_particle_index != nullptr || this->local_number_of_particles == 0); + return std::move(this->local_particle_index); + } + + std::unique_ptr<partsize_t[]> getMyParticlesLabels() + { + assert(this->local_particle_label != nullptr || this->local_number_of_particles == 0); + return std::move(this->local_particle_label); + } + + std::vector<std::unique_ptr<particle_rnumber[]>> getMyRhs() + { + return std::move(std::vector<std::unique_ptr<particle_rnumber[]>>()); + } +}; + + +#endif//PARTICLE_SET_INPUT_HPP diff --git a/cpp/particles/particle_set_input_hdf5.hpp b/cpp/particles/particle_set_input_hdf5.hpp new file mode 100644 index 0000000000000000000000000000000000000000..99c94a8ffccb317aff3ac3fb7d6141ceb918eca3 --- /dev/null +++ b/cpp/particles/particle_set_input_hdf5.hpp @@ -0,0 +1,213 @@ +/****************************************************************************** +* * +* Copyright 2022 Max Planck Computing and Data Facility * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@mpcdf.mpg.de * +* * +******************************************************************************/ + + + +#ifndef PARTICLE_SET_INPUT_HDF5_HPP +#define PARTICLE_SET_INPUT_HDF5_HPP + +#include <random> +#include "particles/particle_set_input.hpp" + +template <class partsize_t, class particle_rnumber, int state_size> +class particle_set_input_hdf5: public particle_set_input<partsize_t, particle_rnumber, state_size> +{ + private: + const std::string file_name; + const std::string species_name; + const int iteration; + + std::vector<hsize_t> file_layout; + + public: + particle_set_input_hdf5( + const MPI_Comm in_mpi_comm, + const std::string& inFileName, + const std::string& inSpeciesName, + const int inIteration, + const particle_rnumber my_spatial_low_limit, + const particle_rnumber my_spatial_up_limit) + : particle_set_input<partsize_t, particle_rnumber, state_size>( + in_mpi_comm, + my_spatial_low_limit, + my_spatial_up_limit) + , file_name(inFileName) + , species_name(inSpeciesName) + , iteration(inIteration) + { + TIMEZONE("particles_set_input_hdf5::particles_set_input_hdf5"); + hid_t dataset = H5E_ERROR; + hid_t particle_file = H5E_ERROR; + hid_t fspace = H5E_ERROR; + hid_t mspace = H5E_ERROR; + int ndims = 0; + + hsize_t *fdims = NULL; + hsize_t *mdims = NULL; + hsize_t *foffset = NULL; + + // open file + hid_t plist_id_par = H5Pcreate(H5P_FILE_ACCESS); + assert(plist_id_par >= 0); + { + int retTest = H5Pset_fapl_mpio(plist_id_par, this->comm, MPI_INFO_NULL); + variable_used_only_in_assert(retTest); + assert(retTest >= 0); + } + particle_file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, plist_id_par); + assert(particle_file >= 0); + assert(particle_file != H5E_ERROR); + + // open state dataset + dataset = H5Dopen( + particle_file, + (species_name + std::string("/state/") + std::to_string(iteration)).c_str(), + H5P_DEFAULT); + assert(dataset >= 0); + assert(dataset != H5E_ERROR); + // read in data space for state dataset + fspace = H5Dget_space(dataset); + assert(fspace >= 0); + assert(fspace != H5E_ERROR); + // get number of dimensions for state array + ndims = H5Sget_simple_extent_ndims(fspace); + fdims = new hsize_t[ndims]; + // read shape of state array + assert(ndims == H5Sget_simple_extent_dims(fspace, fdims, NULL)); + // confirm the state size is correct + assert(fdims[ndims-1] == state_size); + // compute total number of particles, store file layout + this->total_number_of_particles = 1; + this->file_layout.resize(ndims-1); + for (int i=0; i<ndims-1; ++i) + { + this->file_layout[i] = fdims[i]; + this->total_number_of_particles *= fdims[i]; + } + + // now we can tell each processor how many particles it should hold + particles_utils::IntervalSplitter<hsize_t> load_splitter( + this->total_number_of_particles, this->nprocs, this->myrank); + + // we can now use a simpler fspace + H5Sclose(fspace); + delete[] fdims; + fdims = new hsize_t[2]; + fdims[0] = this->total_number_of_particles; + fdims[1] = state_size; + fspace = H5Screate_simple(2, fdims, NULL); + + // allocate array for preliminary particle data + std::unique_ptr<particle_rnumber[]> split_particle_state; + if(load_splitter.getMySize()) + { + split_particle_state.reset(new particle_rnumber[load_splitter.getMySize()*state_size]); + } + + // allocate and populate array for preliminary particle indices + std::unique_ptr<partsize_t[]> split_particle_index; + if(load_splitter.getMySize()) { + split_particle_index.reset(new partsize_t[load_splitter.getMySize()]); + } + for(partsize_t idx = 0 ; idx < partsize_t(load_splitter.getMySize()) ; idx++){ + split_particle_index[idx] = idx + partsize_t(load_splitter.getMyOffset()); + } + + // allocate and populate array for preliminary particle labels + std::unique_ptr<partsize_t[]> split_particle_label; + if(load_splitter.getMySize()) { + split_particle_label.reset(new partsize_t[load_splitter.getMySize()]); + } + for(partsize_t idx = 0 ; idx < partsize_t(load_splitter.getMySize()) ; idx++){ + split_particle_label[idx] = idx + partsize_t(load_splitter.getMyOffset()); + } + + // in memory particle array is flattened, i.e. shaped as "mdims" + mdims = new hsize_t[2]; + mdims[0] = load_splitter.getMySize(); + mdims[1] = state_size; + mspace = H5Screate_simple(2, mdims, NULL); + + // file space offset + foffset = new hsize_t[2]; + foffset[0] = load_splitter.getMyOffset(); + foffset[1] = 0; + + // select local data from file space + H5Sselect_hyperslab(fspace, H5S_SELECT_SET, foffset, NULL, mdims, NULL); + + // read state data + H5Dread( + dataset, + hdf5_tools::hdf5_type_id<particle_rnumber>(), + mspace, + fspace, + H5P_DEFAULT, + split_particle_state.get()); + H5Sclose(mspace); + H5Sclose(fspace); + H5Dclose(dataset); + + // open label dataset + dataset = H5Dopen( + particle_file, + (species_name + std::string("/label/") + std::to_string(iteration)).c_str(), + H5P_DEFAULT); + if (dataset >= 0 && dataset != H5E_ERROR) { + fspace = H5Screate_simple(1, fdims, NULL); + mspace = H5Screate_simple(1, mdims, NULL); + H5Sselect_hyperslab(fspace, H5S_SELECT_SET, foffset, NULL, mdims, NULL); + H5Dread( + dataset, + hdf5_tools::hdf5_type_id<partsize_t>(), + mspace, + fspace, + H5P_DEFAULT, + split_particle_label.get()); + H5Sclose(mspace); + H5Sclose(fspace); + H5Dclose(dataset); + } else { + DEBUG_MSG("Could not read labels for species %s and iteration %d; H5Dopen returned %ld\n", + species_name.c_str(), iteration, dataset); + } + delete[] mdims; + delete[] fdims; + H5Fclose(particle_file); + + this->permute( + load_splitter, + split_particle_state, + split_particle_index, + split_particle_label); + + } + + std::vector<hsize_t> getParticleFileLayout() + { + return this->file_layout; + } +}; + + +#endif//PARTICLE_SET_INPUT_HDF5_HPP diff --git a/cpp/particles/particle_set_input_random.hpp b/cpp/particles/particle_set_input_random.hpp new file mode 100644 index 0000000000000000000000000000000000000000..78e34bf7ffdea44030792b8f5c7608d9183566d0 --- /dev/null +++ b/cpp/particles/particle_set_input_random.hpp @@ -0,0 +1,152 @@ +/****************************************************************************** +* * +* Copyright 2020 Max Planck Institute for Dynamics and Self-Organization * +* * +* This file is part of TurTLE. * +* * +* TurTLE is free software: you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published * +* by the Free Software Foundation, either version 3 of the License, * +* or (at your option) any later version. * +* * +* TurTLE is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with TurTLE. If not, see <http://www.gnu.org/licenses/> * +* * +* Contact: Cristian.Lalescu@ds.mpg.de * +* * +******************************************************************************/ + + + +#ifndef PARTICLE_SET_INPUT_RANDOM_HPP +#define PARTICLE_SET_INPUT_RANDOM_HPP + +#include <random> +#include "particles/particle_set_input.hpp" + +template <class partsize_t, class particle_rnumber, int state_size> +class particle_set_input_random: public particle_set_input<partsize_t, particle_rnumber, state_size> +{ + public: + particle_set_input_random( + const MPI_Comm in_mpi_comm, + const partsize_t NPARTICLES, + const int rseed, + const particle_rnumber my_spatial_low_limit, + const particle_rnumber my_spatial_up_limit): + particle_set_input<partsize_t, particle_rnumber, state_size>(in_mpi_comm, my_spatial_low_limit, my_spatial_up_limit) + { + TIMEZONE("particles_set_input_random::particles_set_input_random"); + // initialize a separate random number generator for each MPI process + std::mt19937_64 rgen; + const double twopi = 4*acos(0); + // positions are uniformly distributed within 2pi cube + // TODO: provide possibilities for different rectangle sizes + std::uniform_real_distribution<particle_rnumber> udist(0.0, twopi); + + // other state components are normally distributed (presumably they're velocities) + std::normal_distribution<particle_rnumber> gdist; + + // seed random number generators such that no seed is ever repeated if we change the value of rseed. + // basically use a multi-dimensional array indexing technique to come up with actual seed. + // Note: this method is not invariant to the number of MPI processes! + int current_seed = ( + rseed * (this->nprocs) + + this->myrank); + rgen.seed(current_seed); + + this->total_number_of_particles = NPARTICLES; + + // we know the total number of particles, but we want to generate particle locations in parallel. + // so we need a preliminary distributor of particles, location-agnostic: + particles_utils::IntervalSplitter<hsize_t> load_splitter( + this->total_number_of_particles, this->nprocs, this->myrank); + + // allocate array for preliminary particle data + std::unique_ptr<particle_rnumber[]> split_particle_state; + if(load_splitter.getMySize()) + { + split_particle_state.reset(new particle_rnumber[load_splitter.getMySize()*state_size]); + } + + // allocate and populate array for preliminary particle indices + std::unique_ptr<partsize_t[]> split_particle_index; + if(load_splitter.getMySize()) { + split_particle_index.reset(new partsize_t[load_splitter.getMySize()]); + } + for(partsize_t idx = 0 ; idx < partsize_t(load_splitter.getMySize()) ; idx++){ + split_particle_index[idx] = idx + partsize_t(load_splitter.getMyOffset()); + } + + // allocate and populate array for preliminary particle labels + std::unique_ptr<partsize_t[]> split_particle_label; + if(load_splitter.getMySize()) { + split_particle_label.reset(new partsize_t[load_splitter.getMySize()]); + } + for(partsize_t idx = 0 ; idx < partsize_t(load_splitter.getMySize()) ; idx++){ + split_particle_label[idx] = idx + partsize_t(load_splitter.getMyOffset()); + } + + // generate random particle states + { + TIMEZONE("particles_input_random::generate random numbers"); + if (state_size == 15) + // deformation tensor + { + for (partsize_t idx=0; idx < partsize_t(load_splitter.getMySize()); idx++) + { + // positions are uniformly distributed within cube + for (int cc=0; cc < 3; cc++) + split_particle_state[idx*state_size + cc] = udist(rgen); + // Q matrix is initially identity + split_particle_state[idx*state_size + 3] = particle_rnumber(1.0); + split_particle_state[idx*state_size + 4] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 5] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 6] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 7] = particle_rnumber(1.0); + split_particle_state[idx*state_size + 8] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 9] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 10] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 11] = particle_rnumber(1.0); + // log-stretching factors are initially 0 + split_particle_state[idx*state_size + 12] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 13] = particle_rnumber(0.0); + split_particle_state[idx*state_size + 14] = particle_rnumber(0.0); + } + } + else + { + for (partsize_t idx=0; idx < partsize_t(load_splitter.getMySize()); idx++) + { + // positions are uniformly distributed within cube + for (int cc=0; cc < 3; cc++) + split_particle_state[idx*state_size + cc] = udist(rgen); + // velocities/orientations are normally distributed + // TODO: add option for normalization of orientation vectors etc + for (int cc = 3; cc < state_size; cc++) + split_particle_state[idx*state_size + cc] = gdist(rgen); + } + } + } + this->permute( + load_splitter, + split_particle_state, + split_particle_index, + split_particle_label); + + } + + std::vector<hsize_t> getParticleFileLayout() + { + return std::vector<hsize_t>({ + hsize_t(this->getTotalNbParticles())}); + } +}; + + +#endif//PARTICLE_SET_INPUT_RANDOM_HPP diff --git a/cpp/particles/particle_solver.cpp b/cpp/particles/particle_solver.cpp index e22290e0a815f9e072b5fdc4a524aa31ef089d9c..10055011d43ad581846a6d9edc5cb288424b809d 100644 --- a/cpp/particles/particle_solver.cpp +++ b/cpp/particles/particle_solver.cpp @@ -309,7 +309,7 @@ template <int state_size> int particle_solver::writeCheckpoint( particles_output_hdf5<partsize_t, particle_rnumber, state_size> *particles_output_writer) { - TIMEZONE("particle_solver::writeCheckpoint"); + TIMEZONE("particle_solver::writeCheckpoint to particles_output_hdf5"); particles_output_writer->update_particle_species_name(this->particle_species_name); particles_output_writer->setParticleFileLayout(pset->getParticleFileLayout()); particles_output_writer->template save<state_size>( @@ -321,9 +321,50 @@ int particle_solver::writeCheckpoint( return EXIT_SUCCESS; } + +template <int state_size> +int particle_solver::writeCheckpoint( + const std::string file_name) +{ + TIMEZONE("particle_solver::writeCheckpoint to file_name"); + this->pset->writeParticleStates<state_size>( + file_name, + this->particle_species_name, + "state", + this->iteration); + return EXIT_SUCCESS; +} + + +template <int state_size> +int particle_solver::writeCheckpointWithLabels( + const std::string file_name) +{ + TIMEZONE("particle_solver::writeCheckpointWithLabels"); + this->template writeCheckpoint<state_size>(file_name); + this->pset->writeParticleLabels( + file_name, + this->particle_species_name, + "label", + this->iteration); + return EXIT_SUCCESS; +} + template int particle_solver::writeCheckpoint<3>(particles_output_hdf5<partsize_t, particle_rnumber, 3> *); template int particle_solver::writeCheckpoint<6>(particles_output_hdf5<partsize_t, particle_rnumber, 6> *); template int particle_solver::writeCheckpoint<7>(particles_output_hdf5<partsize_t, particle_rnumber, 7> *); template int particle_solver::writeCheckpoint<8>(particles_output_hdf5<partsize_t, particle_rnumber, 8> *); template int particle_solver::writeCheckpoint<15>(particles_output_hdf5<partsize_t, particle_rnumber, 15> *); +template int particle_solver::writeCheckpoint<3> (const std::string file_name); +template int particle_solver::writeCheckpoint<6> (const std::string file_name); +template int particle_solver::writeCheckpoint<7> (const std::string file_name); +template int particle_solver::writeCheckpoint<8> (const std::string file_name); +template int particle_solver::writeCheckpoint<15>(const std::string file_name); + +template int particle_solver::writeCheckpointWithLabels<3> (const std::string file_name); +template int particle_solver::writeCheckpointWithLabels<6> (const std::string file_name); +template int particle_solver::writeCheckpointWithLabels<7> (const std::string file_name); +template int particle_solver::writeCheckpointWithLabels<8> (const std::string file_name); +template int particle_solver::writeCheckpointWithLabels<15>(const std::string file_name); + diff --git a/cpp/particles/particle_solver.hpp b/cpp/particles/particle_solver.hpp index 54bbc3c846022c00f220d9f427e121241fd10223..df78618ed9569a2eb1cb04e0d0f8dba540ca93ac 100644 --- a/cpp/particles/particle_solver.hpp +++ b/cpp/particles/particle_solver.hpp @@ -102,6 +102,14 @@ class particle_solver int writeCheckpoint( particles_output_hdf5<partsize_t, particle_rnumber, state_size> *particles_output_writer); + template <int state_size> + int writeCheckpoint( + const std::string file_name); + + template <int state_size> + int writeCheckpointWithLabels( + const std::string file_name); + int Euler( double dt, abstract_particle_rhs &rhs); diff --git a/cpp/particles/particles_output_hdf5.hpp b/cpp/particles/particles_output_hdf5.hpp index 0ef809e63114948509c96f5cbccc5cdd615b3599..72c8c2341e6be965469bb313804a1c2908d21d4f 100644 --- a/cpp/particles/particles_output_hdf5.hpp +++ b/cpp/particles/particles_output_hdf5.hpp @@ -140,83 +140,18 @@ public: return EXIT_SUCCESS; } + // TODO move to parent class, with input filename, particle_species_name and group_name, + // here call the parent method + // parent method will also need to be called in particle_sampling_output_hdf5 void require_checkpoint_groups(std::string filename){ - if(Parent::isInvolved()){ - if (Parent::getMyRank() == 0) - { - bool file_exists = false; - { - struct stat file_buffer; - file_exists = (stat(filename.c_str(), &file_buffer) == 0); - } - hid_t file_id; - if (file_exists) - file_id = H5Fopen( - filename.c_str(), - H5F_ACC_RDWR | H5F_ACC_DEBUG, - H5P_DEFAULT); - else - file_id = H5Fcreate( - filename.c_str(), - H5F_ACC_EXCL | H5F_ACC_DEBUG, - H5P_DEFAULT, - H5P_DEFAULT); - assert(file_id >= 0); - bool group_exists = H5Lexists( - file_id, - this->particle_species_name.c_str(), - H5P_DEFAULT); - if (!group_exists) - { - hid_t gg = H5Gcreate( - file_id, - this->particle_species_name.c_str(), - H5P_DEFAULT, - H5P_DEFAULT, - H5P_DEFAULT); - assert(gg >= 0); - H5Gclose(gg); - } - hid_t gg = H5Gopen( - file_id, - this->particle_species_name.c_str(), - H5P_DEFAULT); - assert(gg >= 0); - group_exists = H5Lexists( - gg, - "state", - H5P_DEFAULT); - if (!group_exists) - { - hid_t ggg = H5Gcreate( - gg, - "state", - H5P_DEFAULT, - H5P_DEFAULT, - H5P_DEFAULT); - assert(ggg >= 0); - H5Gclose(ggg); - } - group_exists = H5Lexists( - gg, - "rhs", - H5P_DEFAULT); - if (!group_exists) - { - hid_t ggg = H5Gcreate( - gg, - "rhs", - H5P_DEFAULT, - H5P_DEFAULT, - H5P_DEFAULT); - assert(ggg >= 0); - H5Gclose(ggg); - } - H5Gclose(gg); - H5Fclose(file_id); - } - MPI_Barrier(Parent::getComWriter()); - } + Parent::require_groups( + filename, + this->particle_species_name, + "state"); + Parent::require_groups( + filename, + this->particle_species_name, + "rhs"); } void write( diff --git a/setup.py.in b/setup.py.in index 984a8993a1aedbbcae15267b7dcd24fb87012fe8..ae177154567b81ec87fdb208982cfeed772a3afa 100644 --- a/setup.py.in +++ b/setup.py.in @@ -51,6 +51,7 @@ setup( 'turtle.test_fftw = TurTLE.test.test_fftw:main', 'turtle.test_Heun_p2p = TurTLE.test.test_Heun_p2p:main', 'turtle.test_particle_deleter = TurTLE.test.test_particle_deleter:main', + 'turtle.test_particle_set_init = TurTLE.test.test_particle_set_init:main', 'turtle.test_collisions = TurTLE.test.test_collisions:main'], }, version = VERSION,