Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
TurTLE
TurTLE
Commits
477fccc1
Commit
477fccc1
authored
May 25, 2017
by
Chichi Lalescu
Browse files
Merge branch 'feature/field-binary-io' into develop
parents
a66939b6
9b51e23e
Pipeline
#12983
passed with stage
in 6 minutes and 2 seconds
Changes
24
Pipelines
2
Expand all
Hide whitespace changes
Inline
Side-by-side
bfps/PP.py
0 → 100644
View file @
477fccc1
This diff is collapsed.
Click to expand it.
bfps/__main__.py
View file @
477fccc1
...
...
@@ -29,6 +29,7 @@ import argparse
import
bfps
from
.DNS
import
DNS
from
.PP
import
PP
from
.NavierStokes
import
NavierStokes
from
.NSVorticityEquation
import
NSVorticityEquation
from
.FluidResize
import
FluidResize
...
...
@@ -65,7 +66,7 @@ def main():
'NSManyParticles-double'
]
parser
.
add_argument
(
'base_class'
,
choices
=
[
'DNS'
]
+
choices
=
[
'DNS'
,
'PP'
]
+
NSoptions
+
NSVEoptions
+
FRoptions
+
...
...
@@ -81,6 +82,10 @@ def main():
c
=
DNS
()
c
.
launch
(
args
=
sys
.
argv
[
2
:])
return
None
if
opt
.
base_class
==
'PP'
:
c
=
PP
()
c
.
launch
(
args
=
sys
.
argv
[
2
:])
return
None
if
'double'
in
opt
.
base_class
:
precision
=
'double'
else
:
...
...
bfps/_base.py
View file @
477fccc1
...
...
@@ -123,7 +123,7 @@ class _base(object):
template_par
=
'int'
elif
parameters
[
key
[
i
]].
dtype
==
np
.
float64
:
template_par
=
'double'
src_txt
+=
'{0} = read_vector<{1}>(parameter_file, "/{2}/{0}");
\n
'
.
format
(
src_txt
+=
'{0} =
hdf5_tools::
read_vector<{1}>(parameter_file, "/{2}/{0}");
\n
'
.
format
(
key_prefix
+
key
[
i
],
template_par
,
file_group
)
else
:
src_txt
+=
'H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &{0});
\n
'
.
format
(
key_prefix
+
key
[
i
])
...
...
bfps/_code.py
View file @
477fccc1
...
...
@@ -249,6 +249,8 @@ class _code(_base):
assert
(
self
.
compile_code
()
==
0
)
if
self
.
work_dir
!=
os
.
path
.
realpath
(
os
.
getcwd
()):
shutil
.
copy
(
self
.
name
,
self
.
work_dir
)
if
'niter_todo'
not
in
self
.
parameters
.
keys
():
self
.
parameters
[
'niter_todo'
]
=
1
current_dir
=
os
.
getcwd
()
os
.
chdir
(
self
.
work_dir
)
os
.
chdir
(
current_dir
)
...
...
bfps/cpp/base.hpp
View file @
477fccc1
...
...
@@ -29,7 +29,7 @@
#include
<stdarg.h>
#include
<iostream>
#include
<typeinfo>
#include
"
io
_tools.hpp"
#include
"
hdf5
_tools.hpp"
#ifndef BASE
...
...
bfps/cpp/fftw_interface.hpp
View file @
477fccc1
...
...
@@ -167,4 +167,7 @@ public:
}
};
#endif // FFTW_INTERFACE_HPP
bfps/cpp/field.hpp
View file @
477fccc1
...
...
@@ -91,6 +91,11 @@ class field
const
std
::
string
field_name
,
const
int
iteration
);
int
io_binary
(
const
std
::
string
fname
,
const
int
iteration
,
const
bool
read
=
true
);
/* essential FFT stuff */
void
dft
();
void
ift
();
...
...
bfps/cpp/field_binary_IO.cpp
0 → 100644
View file @
477fccc1
/**********************************************************************
* *
* Copyright 2015 Max Planck Institute *
* for Dynamics and Self-Organization *
* *
* This file is part of bfps. *
* *
* bfps is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* bfps is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with bfps. If not, see <http://www.gnu.org/licenses/> *
* *
* Contact: Cristian.Lalescu@ds.mpg.de *
* *
**********************************************************************/
#include
<vector>
#include
<array>
#include
"base.hpp"
#include
"scope_timer.hpp"
#include
"field_binary_IO.hpp"
template
<
typename
rnumber
,
field_representation
fr
,
field_components
fc
>
field_binary_IO
<
rnumber
,
fr
,
fc
>::
field_binary_IO
(
const
hsize_t
*
SIZES
,
const
hsize_t
*
SUBSIZES
,
const
hsize_t
*
STARTS
,
const
MPI_Comm
COMM_TO_USE
)
:
field_layout
<
fc
>
(
SIZES
,
SUBSIZES
,
STARTS
,
COMM_TO_USE
)
{
TIMEZONE
(
"field_binary_IO::field_binary_IO"
);
std
::
vector
<
int
>
tsizes
;
std
::
vector
<
int
>
tsubsizes
;
std
::
vector
<
int
>
tstarts
;
tsizes
.
resize
(
ndim
(
fc
));
tsubsizes
.
resize
(
ndim
(
fc
));
tstarts
.
resize
(
ndim
(
fc
));
for
(
int
i
=
0
;
i
<
ndim
(
fc
);
i
++
)
{
tsizes
[
i
]
=
int
(
this
->
sizes
[
i
]);
tsubsizes
[
i
]
=
int
(
this
->
subsizes
[
i
]);
tstarts
[
i
]
=
int
(
this
->
starts
[
i
]);
}
// these are required if using unsigned char in the subarray creation
//tsizes[ndim(fc)-1] *= sizeof(element_type);
//tsubsizes[ndim(fc)-1] *= sizeof(element_type);
//tstarts[ndim(fc)-1] *= sizeof(element_type);
MPI_Type_create_subarray
(
ndim
(
fc
),
&
tsizes
.
front
(),
&
tsubsizes
.
front
(),
&
tstarts
.
front
(),
MPI_ORDER_C
,
//MPI_UNSIGNED_CHAR, // in case element type fails
mpi_type
<
rnumber
>
(
fr
),
&
this
->
mpi_array_dtype
);
MPI_Type_commit
(
&
this
->
mpi_array_dtype
);
// check if there are processes without any data
int
local_zero_array
[
this
->
nprocs
],
zero_array
[
this
->
nprocs
];
for
(
int
i
=
0
;
i
<
this
->
nprocs
;
i
++
)
local_zero_array
[
i
]
=
0
;
local_zero_array
[
this
->
myrank
]
=
(
this
->
subsizes
[
0
]
==
0
)
?
1
:
0
;
MPI_Allreduce
(
local_zero_array
,
zero_array
,
this
->
nprocs
,
MPI_INT
,
MPI_SUM
,
this
->
comm
);
int
no_of_excluded_ranks
=
0
;
for
(
int
i
=
0
;
i
<
this
->
nprocs
;
i
++
)
no_of_excluded_ranks
+=
zero_array
[
i
];
DEBUG_MSG_WAIT
(
this
->
comm
,
"subsizes[0] = %d %d
\n
"
,
this
->
subsizes
[
0
],
tsubsizes
[
0
]);
if
(
no_of_excluded_ranks
==
0
)
{
this
->
io_comm
=
this
->
comm
;
this
->
io_comm_nprocs
=
this
->
nprocs
;
this
->
io_comm_myrank
=
this
->
myrank
;
}
else
{
int
excluded_rank
[
no_of_excluded_ranks
];
for
(
int
i
=
0
,
j
=
0
;
i
<
this
->
nprocs
;
i
++
)
if
(
zero_array
[
i
])
{
excluded_rank
[
j
]
=
i
;
j
++
;
}
MPI_Group
tgroup0
,
tgroup
;
MPI_Comm_group
(
this
->
comm
,
&
tgroup0
);
MPI_Group_excl
(
tgroup0
,
no_of_excluded_ranks
,
excluded_rank
,
&
tgroup
);
MPI_Comm_create
(
this
->
comm
,
tgroup
,
&
this
->
io_comm
);
MPI_Group_free
(
&
tgroup0
);
MPI_Group_free
(
&
tgroup
);
if
(
this
->
subsizes
[
0
]
>
0
)
{
MPI_Comm_rank
(
this
->
io_comm
,
&
this
->
io_comm_myrank
);
MPI_Comm_size
(
this
->
io_comm
,
&
this
->
io_comm_nprocs
);
}
else
{
this
->
io_comm_myrank
=
MPI_PROC_NULL
;
this
->
io_comm_nprocs
=
-
1
;
}
}
}
template
<
typename
rnumber
,
field_representation
fr
,
field_components
fc
>
field_binary_IO
<
rnumber
,
fr
,
fc
>::~
field_binary_IO
()
{
TIMEZONE
(
"field_binary_IO::~field_binary_IO"
);
MPI_Type_free
(
&
this
->
mpi_array_dtype
);
if
(
this
->
nprocs
!=
this
->
io_comm_nprocs
&&
this
->
io_comm_myrank
!=
MPI_PROC_NULL
)
{
MPI_Comm_free
(
&
this
->
io_comm
);
}
}
template
<
typename
rnumber
,
field_representation
fr
,
field_components
fc
>
int
field_binary_IO
<
rnumber
,
fr
,
fc
>::
read
(
const
std
::
string
fname
,
void
*
buffer
)
{
TIMEZONE
(
"field_binary_IO::read"
);
char
representation
[]
=
"native"
;
if
(
this
->
subsizes
[
0
]
>
0
)
{
MPI_Info
info
;
MPI_Info_create
(
&
info
);
MPI_File
f
;
char
ffname
[
512
];
sprintf
(
ffname
,
"%s"
,
fname
.
c_str
());
MPI_File_open
(
this
->
io_comm
,
ffname
,
MPI_MODE_RDONLY
,
info
,
&
f
);
MPI_File_set_view
(
f
,
0
,
mpi_type
<
rnumber
>
(
fr
),
this
->
mpi_array_dtype
,
representation
,
info
);
MPI_File_read_all
(
f
,
buffer
,
this
->
local_size
,
mpi_type
<
rnumber
>
(
fr
),
MPI_STATUS_IGNORE
);
MPI_File_close
(
&
f
);
}
return
EXIT_SUCCESS
;
}
template
<
typename
rnumber
,
field_representation
fr
,
field_components
fc
>
int
field_binary_IO
<
rnumber
,
fr
,
fc
>::
write
(
const
std
::
string
fname
,
void
*
buffer
)
{
TIMEZONE
(
"field_binary_IO::write"
);
char
representation
[]
=
"native"
;
if
(
this
->
subsizes
[
0
]
>
0
)
{
MPI_Info
info
;
MPI_Info_create
(
&
info
);
MPI_File
f
;
char
ffname
[
512
];
sprintf
(
ffname
,
"%s"
,
fname
.
c_str
());
MPI_File_open
(
this
->
io_comm
,
ffname
,
MPI_MODE_CREATE
|
MPI_MODE_WRONLY
,
info
,
&
f
);
MPI_File_set_view
(
f
,
0
,
mpi_type
<
rnumber
>
(
fr
),
this
->
mpi_array_dtype
,
representation
,
info
);
MPI_File_write_all
(
f
,
buffer
,
this
->
local_size
,
mpi_type
<
rnumber
>
(
fr
),
MPI_STATUS_IGNORE
);
MPI_File_close
(
&
f
);
}
return
EXIT_SUCCESS
;
}
template
class
field_binary_IO
<
float
,
REAL
,
ONE
>;
template
class
field_binary_IO
<
float
,
COMPLEX
,
ONE
>;
template
class
field_binary_IO
<
double
,
REAL
,
ONE
>;
template
class
field_binary_IO
<
double
,
COMPLEX
,
ONE
>;
template
class
field_binary_IO
<
float
,
REAL
,
THREE
>;
template
class
field_binary_IO
<
float
,
COMPLEX
,
THREE
>;
template
class
field_binary_IO
<
double
,
REAL
,
THREE
>;
template
class
field_binary_IO
<
double
,
COMPLEX
,
THREE
>;
template
class
field_binary_IO
<
float
,
REAL
,
THREExTHREE
>;
template
class
field_binary_IO
<
float
,
COMPLEX
,
THREExTHREE
>;
template
class
field_binary_IO
<
double
,
REAL
,
THREExTHREE
>;
template
class
field_binary_IO
<
double
,
COMPLEX
,
THREExTHREE
>;
bfps/cpp/field_binary_IO.hpp
0 → 100644
View file @
477fccc1
/**********************************************************************
* *
* Copyright 2015 Max Planck Institute *
* for Dynamics and Self-Organization *
* *
* This file is part of bfps. *
* *
* bfps is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* bfps is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with bfps. If not, see <http://www.gnu.org/licenses/> *
* *
* Contact: Cristian.Lalescu@ds.mpg.de *
* *
**********************************************************************/
#include
<vector>
#include
<string>
#include
"base.hpp"
#include
"fftw_interface.hpp"
#include
"field_layout.hpp"
#include
"field.hpp"
#ifndef FIELD_BINARY_IO_HPP
#define FIELD_BINARY_IO_HPP
/* could this be a boolean somehow?*/
enum
field_representation
:
bool
{
REAL
=
true
,
COMPLEX
=
false
};
template
<
typename
rnumber
>
constexpr
MPI_Datatype
mpi_type
(
field_representation
fr
)
{
return
((
fr
==
REAL
)
?
mpi_real_type
<
rnumber
>::
real
()
:
mpi_real_type
<
rnumber
>::
complex
());
}
template
<
typename
rnumber
,
field_representation
fr
,
field_components
fc
>
class
field_binary_IO
:
public
field_layout
<
fc
>
{
private:
MPI_Comm
io_comm
;
int
io_comm_myrank
,
io_comm_nprocs
;
MPI_Datatype
mpi_array_dtype
;
public:
/* methods */
field_binary_IO
(
const
hsize_t
*
SIZES
,
const
hsize_t
*
SUBSIZES
,
const
hsize_t
*
STARTS
,
const
MPI_Comm
COMM_TO_USE
);
~
field_binary_IO
();
int
read
(
const
std
::
string
fname
,
void
*
buffer
);
int
write
(
const
std
::
string
fname
,
void
*
buffer
);
};
#endif//FIELD_BINARY_IO_HPP
bfps/cpp/full_code/NSVE_field_stats.cpp
0 → 100644
View file @
477fccc1
#include
<string>
#include
<cmath>
#include
"NSVE_field_stats.hpp"
#include
"scope_timer.hpp"
template
<
typename
rnumber
>
int
NSVE_field_stats
<
rnumber
>::
initialize
(
void
)
{
this
->
postprocess
::
read_parameters
();
this
->
vorticity
=
new
field
<
rnumber
,
FFTW
,
THREE
>
(
nx
,
ny
,
nz
,
this
->
comm
,
DEFAULT_FFTW_FLAG
);
this
->
vorticity
->
real_space_representation
=
false
;
hid_t
parameter_file
=
H5Fopen
(
(
this
->
simname
+
std
::
string
(
".h5"
)).
c_str
(),
H5F_ACC_RDONLY
,
H5P_DEFAULT
);
if
(
!
H5Lexists
(
parameter_file
,
"field_dtype"
,
H5P_DEFAULT
))
this
->
bin_IO
=
NULL
;
else
{
hid_t
dset
=
H5Dopen
(
parameter_file
,
"field_dtype"
,
H5P_DEFAULT
);
hid_t
space
=
H5Dget_space
(
dset
);
hid_t
memtype
=
H5Dget_type
(
dset
);
char
*
string_data
=
(
char
*
)
malloc
(
256
);
H5Dread
(
dset
,
memtype
,
H5S_ALL
,
H5S_ALL
,
H5P_DEFAULT
,
&
string_data
);
// check that we're using the correct data type
// field_dtype SHOULD be something like "<f4", "<f8", ">f4", ">f8"
// first character is ordering, which is machine specific
// for the other two I am checking that they have the correct values
assert
(
string_data
[
1
]
==
'f'
);
assert
(
string_data
[
2
]
==
'0'
+
sizeof
(
rnumber
));
free
(
string_data
);
H5Sclose
(
space
);
H5Tclose
(
memtype
);
H5Dclose
(
dset
);
this
->
bin_IO
=
new
field_binary_IO
<
rnumber
,
COMPLEX
,
THREE
>
(
this
->
vorticity
->
clayout
->
sizes
,
this
->
vorticity
->
clayout
->
subsizes
,
this
->
vorticity
->
clayout
->
starts
,
this
->
vorticity
->
clayout
->
comm
);
}
H5Fclose
(
parameter_file
);
return
EXIT_SUCCESS
;
}
template
<
typename
rnumber
>
int
NSVE_field_stats
<
rnumber
>::
read_current_cvorticity
(
void
)
{
this
->
vorticity
->
real_space_representation
=
false
;
if
(
this
->
bin_IO
!=
NULL
)
{
char
itername
[
16
];
sprintf
(
itername
,
"i%.5x"
,
this
->
iteration
);
std
::
string
native_binary_fname
=
(
this
->
simname
+
std
::
string
(
"_cvorticity_"
)
+
std
::
string
(
itername
));
this
->
bin_IO
->
read
(
native_binary_fname
,
this
->
vorticity
->
get_cdata
());
}
else
{
this
->
vorticity
->
io
(
this
->
simname
+
std
::
string
(
"_fields.h5"
),
"vorticity"
,
this
->
iteration
,
true
);
}
return
EXIT_SUCCESS
;
}
template
<
typename
rnumber
>
int
NSVE_field_stats
<
rnumber
>::
finalize
(
void
)
{
if
(
this
->
bin_IO
!=
NULL
)
delete
this
->
bin_IO
;
delete
this
->
vorticity
;
return
EXIT_SUCCESS
;
}
template
<
typename
rnumber
>
int
NSVE_field_stats
<
rnumber
>::
work_on_current_iteration
(
void
)
{
DEBUG_MSG
(
"entered NSVE_field_stats::work_on_current_iteration
\n
"
);
return
EXIT_SUCCESS
;
}
template
class
NSVE_field_stats
<
float
>;
template
class
NSVE_field_stats
<
double
>;
bfps/cpp/full_code/NSVE_field_stats.hpp
0 → 100644
View file @
477fccc1
/**********************************************************************
* *
* Copyright 2017 Max Planck Institute *
* for Dynamics and Self-Organization *
* *
* This file is part of bfps. *
* *
* bfps is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* bfps is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with bfps. If not, see <http://www.gnu.org/licenses/> *
* *
* Contact: Cristian.Lalescu@ds.mpg.de *
* *
**********************************************************************/
#ifndef NSVE_FIELD_STATS_HPP
#define NSVE_FIELD_STATS_HPP
#include
<cstdlib>
#include
<sys/types.h>
#include
<sys/stat.h>
#include
<vector>
#include
"base.hpp"
#include
"field.hpp"
#include
"field_binary_IO.hpp"
#include
"full_code/postprocess.hpp"
template
<
typename
rnumber
>
class
NSVE_field_stats
:
public
postprocess
{
private:
field_binary_IO
<
rnumber
,
COMPLEX
,
THREE
>
*
bin_IO
;
public:
field
<
rnumber
,
FFTW
,
THREE
>
*
vorticity
;
NSVE_field_stats
(
const
MPI_Comm
COMMUNICATOR
,
const
std
::
string
&
simulation_name
)
:
postprocess
(
COMMUNICATOR
,
simulation_name
){}
virtual
~
NSVE_field_stats
(){}
virtual
int
initialize
(
void
);
virtual
int
work_on_current_iteration
(
void
);
virtual
int
finalize
(
void
);
int
read_current_cvorticity
(
void
);
};
#endif//NSVE_FIELD_STATS_HPP
bfps/cpp/full_code/code_base.cpp
0 → 100644
View file @
477fccc1
#include
"code_base.hpp"
#include
"scope_timer.hpp"
code_base
::
code_base
(
const
MPI_Comm
COMMUNICATOR
,
const
std
::
string
&
simulation_name
)
:
comm
(
COMMUNICATOR
),
simname
(
simulation_name
)
{
MPI_Comm_rank
(
this
->
comm
,
&
this
->
myrank
);
MPI_Comm_size
(
this
->
comm
,
&
this
->
nprocs
);
this
->
stop_code_now
=
false
;
}
int
code_base
::
check_stopping_condition
(
void
)
{
if
(
myrank
==
0
)
{
std
::
string
fname
=
(
std
::
string
(
"stop_"
)
+
std
::
string
(
this
->
simname
));
{
struct
stat
file_buffer
;
this
->
stop_code_now
=
(
stat
(
fname
.
c_str
(),
&
file_buffer
)
==
0
);
}
}
MPI_Bcast
(
&
this
->
stop_code_now
,
1
,
MPI_C_BOOL
,
0
,
MPI_COMM_WORLD
);
return
EXIT_SUCCESS
;
}
bfps/cpp/full_code/code_base.hpp
0 → 100644
View file @
477fccc1
/**********************************************************************
* *
* Copyright 2017 Max Planck Institute *
* for Dynamics and Self-Organization *
* *
* This file is part of bfps. *
* *
* bfps is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* bfps is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with bfps. If not, see <http://www.gnu.org/licenses/> *
* *