diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c2771c2c701b4566584eb13966a6967aed56767b..8fbf29c00147727a94e2df4edb073142b60ddef5 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,6 +1,7 @@
 stages:
     - build
     - test
+    - deploy
 
 image: gitlab-registry.mpcdf.mpg.de/mpcdf/module-image
 
@@ -87,3 +88,67 @@ test-intel:
         - job: build-intel
           artifacts: true
 
+build-doc:
+    stage: build
+    script:
+      - *load_modules
+      - mkdir build-doc
+      - cd build-doc
+      - module load graphviz doxygen
+      - pip install breathe
+      - yum -y install gd
+      - cmake .. -DNDEBUG=OFF -DTIMING_OUTPUT=OFF
+      - make VERBOSE=1 doc_doxygen
+      - make VERBOSE=1 doc_html
+        #- make VERBOSE=1 doc_latex
+    tags:
+      - docker
+    variables:
+      COMPILER: "gcc"
+      MPI: "impi"
+      MPICXX: "mpigxx"
+    artifacts:
+        paths:
+            - build-doc/
+        when: always
+        expire_in: 12 hrs
+
+        #build-doc-latex:
+        #    stage: test
+        #    image: gitlab-registry.mpcdf.mpg.de/mpcdf/documentation/docs-ci:latest
+        #    script:
+        #        - cd build-doc/sphinx_latex
+        #        - make
+        #        - cd ../..
+        #        - cp build-doc/sphinx_latex/TurTLE.pdf TurTLE_manual.pdf
+        #    needs:
+        #        - job: build-doc
+        #          artifacts: true
+        #    artifacts:
+        #        paths:
+        #            - TurTLE_manual.pdf
+        #        when: always
+        #        expire_in: 12 hrs
+
+pages:
+    stage: deploy
+    dependencies:
+      - build-doc
+    script:
+      - mkdir public
+      - mv build-doc/html public/
+      - mv build-doc/sphinx_html public/
+        #- mv TurTLE_manual.pdf public/
+    artifacts:
+        paths:
+            -  public
+    only:
+      - tags
+    tags:
+      - docker
+    needs:
+        - job: build-doc
+          artifacts: true
+          #- job: build-doc-latex
+          #artifacts: true
+
diff --git a/AUTHORS b/AUTHORS
index 721614f97500c408f43db227db571711b76e9036..8e5fa7e98c21cf64e6f3dd5e8ed17f44c3872590 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,11 +1,17 @@
-All people who contributed to TurTLE, in order of the date of their first
-contribution.
-
-Cristian C Lalescu <Cristian.Lalescu@mpcdf.mpg.de>
-Dimitar Vlaykov
-Berenger Bramas
-Debarghya Banerjee
-Jose Agustin Arguedas Leiva
-Markus Rampp
-Tobias Baetge
-Lukas Bentkamp
+- Cristian C Lalescu <Cristian.Lalescu@mpcdf.mpg.de>
+
+- Dimitar Vlaykov
+
+- Berenger Bramas
+
+- Debarghya Banerjee
+
+- Jose Agustin Arguedas Leiva
+
+- Markus Rampp
+
+- Tobias Baetge
+
+- Lukas Bentkamp
+
+- Michael Wilczek
diff --git a/README.rst b/README.rst
index 2845db4b02aeef99884371b631b2b17a18ffb65f..e36b0d29b5caf04868c02a0369448d77f5ee53b9 100644
--- a/README.rst
+++ b/README.rst
@@ -1,5 +1,5 @@
-.. image:: https://gitlab.mpcdf.mpg.de/clalescu/turtle/badges/develop/pipeline.svg
-    :target: https://gitlab.mpcdf.mpg.de/clalescu/turtle/commits/develop
+.. image:: https://gitlab.mpcdf.mpg.de/TurTLE/turtle/badges/develop/pipeline.svg
+    :target: https://gitlab.mpcdf.mpg.de/TurTLE/turtle/commits/develop
 
 =========================================
 Turbulence Tools: Lagrangian and Eulerian
@@ -35,6 +35,25 @@ reducing the time spent on setting up and supervising ensembles of DNS,
 with the added benefit of a unified launch-postprocess approach through
 the use of the Python wrapper.
 
+----------------------------
+Background and documentation
+----------------------------
+
+Partial documentation is available at
+http://TurTLE.pages.mpcdf.de/turtle/sphinx_html; the C++ API is
+described at http://TurTLE.pages.mpcdf.de/turtle/html
+(`doxygen`-generated).
+
+Authors
+-------
+
+TurTLE is developed and maintained by the Wilczek group at the Max Planck Institute for Dynamics and Self-Organization and the University of Bayreuth in collaboration with the Application Support Group of the
+Max Planck Computing and Data Facility.
+
+TurTLE contains contributions from:
+
+    .. include:: AUTHORS
+
 .. _sec-installation:
 
 ------------
@@ -51,6 +70,7 @@ The Python 3 package may be installed directly if only post-processing
 of existing data is desired:
 
 .. code:: bash
+
     bash configure_python.sh
     python setup.py install
 
@@ -62,47 +82,19 @@ of existing data is desired:
 The C++ library requires a number of dependencies, that `CMake` will
 search for before compilation and installation.
 In brief, an MPI compiler is required, as well as the HDF5 C library
-compiled with parallel support and FFTW >= 3.4.
-Detailed instructions are provided at the end of this document.
-
--------------
-Documentation
--------------
-
-Partial documentation is available (link to online documentation coming
-soon).
-Doxygen is used for the C++ source code, and the `Sphinx` and `breathe`
-Python packages are used to merge the Python wrapper documentation with
-the C++ documentation.
-The optional `CMake` targets `doc_doxygen`, `doc_html` and `doc_latex`
-generate the required documents in the build directory.
-
---------
-Comments
---------
-
-* the `cmake` folder contains files extracted from
-  https://gitlab.inria.fr/solverstack/morse_cmake, a separate project licensed
-  under the "CeCILL-C" license, please see
-  http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html for
-  details.
-
-* particles: initialization of multistep solvers is done with lower
-  order methods, so direct convergence tests will fail.
-
-* code is used mainly with Python 3.5 and later, and it is not tested at
-  all with Python 2.x
-
------------------
-Full installation
------------------
+compiled with parallel support and FFTW >= 3.3.4.
+We provide instructions for local compilation of FFTW and HDF5, because
+default versions packaged with Linux variants are typically inadequately
+configured.
 
 These installation steps assume that you have a working MPI compiler,
 properly configured on your system (i.e. the various configure scripts
 are able to find it), as well as the `cmake` tool.
 The list is a work in progress, please contact us
-(Cristian.Lalescu@ds.mpg.de) if the procedure fails at any step of the
+(Cristian.Lalescu@mpcdf.mpg.de) if the procedure fails at any step of the
 process.
+We recommend to first go through the instructions in full, and only
+afterwards starting to execute the individual steps.
 
 *Note*: the HDF5 1.8x library may require MPI standard v1, we haven't tested
 this in detail. Some default OpenMPI3 installations will not work.
@@ -110,6 +102,21 @@ this in detail. Some default OpenMPI3 installations will not work.
 Detailed full installation instructions:
 
 1.
+    Optional.
+    Make a directory `PREFIX` on a local fast partition.
+    (Under unix systems, you can probably use `~/.local`).
+
+2.
+    Optional.
+    We recommend the creation of a virtual python3 environment that will be
+    used for installing TurTLE.
+    Please see https://docs.python-guide.org/dev/virtualenvs/.
+    In the following, the install location of this environment is
+    denoted `TurTLE_DIR`.
+    In principle `TurTLE_DIR` should be a subdirectory of the previously
+    created `PREFIX` folder.
+
+3.
     Export the following environment variables with apropriate values:
      * `MPICXX`               - Preferred MPI C++ compiler
      * `FFTW_DIR`             - Base directory of FFTW
@@ -117,70 +124,48 @@ Detailed full installation instructions:
      * `FFTW_LIBDIR`          - Directory containing FFTW library files
      * `FFTW_OPENMP_LIBDIR`   - Directory containing OpenMP FFTW library files (please define if different from FFTW_LIBDIR)
      * `FFTW_MPI_LIBDIR`      - Directory containing MPI FFTW library files (please define if different from FFTW_LIBDIR)
-     * `HDF5_ROOT`            - Base directory of HDF5
-     * `CMAKE_PREFIX_PATH`    - Directory containing local cmake configuration files
-     * `PINCHECK_ROOT`        - Directory under which
-       `include/pincheck.hpp` may be found (OPTIONAL)
-
-    We also recommend that an environment variable `TURTLE_COMPILATION_FLAGS`
-    is defined appropriately.
-    In particular, for clusters of unknown architecture it helps to log into
-    individual nodes and run the following command:
-
-    .. code:: bash
-
-        gcc -march=native -Q --help=target
-
-    Note that an example file `example.bashrc` is included in the
-    repository.
-    There is at least one recorded case of a cluster where different
-    FFTW libraries (serial, MPI and OpenMP) were located in different
-    folders, hence the options of specifying the details.
-    See also lines 143 through 160 of `CMakeLists.txt`.
-
-    *If using openmpi* you may need to recompile it with the
-    `--enable-mpi1-compatibility` flag, in order for HDF5 to compile and
-    link (step 4).
 
-    *PINCHECK* is available from https://gitlab.mpcdf.mpg.de/khr/pincheck
-
-2.
-    Make directory PREFIX on a local fast partition.
-
-3.
-    Download, compile, install FFTW (latest version 3.x from http://www.fftw.org/).
+    If you do not have FFTW installed already, then download, compile, install FFTW
+    (latest version 3.x from http://www.fftw.org/).
     Execute the following commands in order, feel free to customize
     optimisation flags for your own computer (see http://www.fftw.org/fftw3_doc/Installation-on-Unix.html):
 
     .. code:: bash
 
-        ./configure --prefix=PREFIX --enable-float --enable-sse --enable-mpi --enable-openmp --enable-threads
+        ./configure --prefix=FFTW_DIR --enable-float --enable-sse --enable-mpi --enable-openmp --enable-threads
         make
         make install
-        ./configure --prefix=PREFIX  --enable-sse2 --enable-avx512 --enable-mpi --enable-openmp --enable-threads
+        ./configure --prefix=FFTW_DIR  --enable-sse2 --enable-avx512 --enable-mpi --enable-openmp --enable-threads
         make
         make install
 
+    In the above instructions, replace `FFTW_DIR` with an appropriate
+    location (for instance it is safe to use the `PREFIX` defined above).
     TurTLE will try to find FFTW using the FindFFTW from the Morse project.
-    If the package is installed in a non standard location, it is recommanded
-    to setup the environment variables: `FFTW_DIR`, `FFTW_INCDIR`,
+    If the package is installed in a non standard location, you should
+    setup the environment variables listed above: `FFTW_DIR`, `FFTW_INCDIR`,
     `FFTW_LIBDIR`, `FFTW_OPEMMP_LIBDIR` and `FFTW_MPI_LIBDIR`.
 
 4.
-    Download, compile, install HDF5.
+    Export the following environment variables with apropriate values:
+     * `MPICXX`               - Preferred MPI C++ compiler
+     * `HDF5_ROOT`            - Base directory of HDF5
+
+    If you do not have HDF5 installed already, then download, compile, install
+    HDF5.
     We are using parallel I/O, therefore we must use the plain C interface of HDF5:
 
     .. code:: bash
 
-        ./configure --prefix=PREFIX --enable-parallel
+        ./configure --prefix=HDF5_ROOT --enable-parallel
         make
         make install
 
-    TurTLE will try to find HDF5 using the regular FindHDF5.
-    Therefore, if the package is installed in a non standard location, it is recommanded
-    to setup the environment variable: HDF5_ROOT.
+    TurTLE will try to find HDF5 using the regular FindHDF5, which
+    searches system folders, or `HDF5_ROOT`.
 
-5.  TurTLE requires `cmake` version > 3.12.
+5.  TurTLE requires `cmake` version > 3.12, which should be available
+    from your default package manager.
     If required, download, compile and install cmake, currently
     available at https://cmake.org/cmake/resources/software.html.
 
@@ -190,33 +175,85 @@ Detailed full installation instructions:
         make
         make install
 
-5.
-    Optional.
-    We recommend the creation of a virtual python3 environment (also
-    under PREFIX) that will be used for installing TurTLE and
-    dependencies.
-    Please see https://docs.python-guide.org/dev/virtualenvs/.
+    The value of `PREFIX` used above is only relevant to later executing
+    the `cmake` binary (which can be found under `${PREFIX}/bin` after
+    installation).
 
 6.
     Clone turtle repository.
 
     .. code:: bash
 
-        git clone git@gitlab.mpcdf.mpg.de:clalescu/turtle.git
+        git clone git@gitlab.mpcdf.mpg.de:TurTLE/turtle.git
 
 7.
-    Copy the `pc_host_info.py` file to `host_info.py` file and edit according to
-    the instructions in the file. This must be done before installing on a
-    cluster.
-
-8.
     Go into TurTLE repository, execute
 
     .. code:: bash
 
         mkdir build
         cd build
-        cmake .. -DCMAKE_INSTALL_PREFIX=INSTALL_DIR
+        cp ../pc_host_info.py ./host_info.py
+
+    Edit the `host_info.py` file according to the instructions in the file.
+    This is strictly required before installing on a cluster.
+
+8.
+    Optional.
+    *PINCHECK* is available from https://gitlab.mpcdf.mpg.de/khr/pincheck.
+    If you'd like to check whether TurTLE MPI processes and OpenMP
+    threads are pinned properly to hardware threads, you can simply
+    place the pincheck headers under `${PREFIX}/include`.
+
+9.
+    In the build folder, edit the file `bash_setup_for_TurTLE.sh` so that it
+    exports the following environment variables with apropriate values:
+     * `MPICXX`               - Preferred MPI C++ compiler
+     * `FFTW_DIR`             - Base directory of FFTW
+     * `FFTW_INCDIR`          - Directory containing FFTW header files
+     * `FFTW_LIBDIR`          - Directory containing FFTW library files
+     * `FFTW_OPENMP_LIBDIR`   - Directory containing OpenMP FFTW library files (please define if different from FFTW_LIBDIR)
+     * `FFTW_MPI_LIBDIR`      - Directory containing MPI FFTW library files (please define if different from FFTW_LIBDIR)
+     * `HDF5_ROOT`            - Base directory of HDF5
+     * `PINCHECK_ROOT`        - Directory under which
+       `include/pincheck.hpp` may be found (OPTIONAL)
+     * `TurTLE_DIR`           - Directory under which TurTLE will be installed (same value from step 2).
+       I.e. library will go under `${TurTLE_DIR}/lib`, headers under
+       `${TurTLE_DIR}/include`, etc.
+    This script should also contain the line
+    `source TurTLE_DIR/lib/bash_setup_for_TurTLE.sh`, as well as the
+    line `source TurTLE_DIR/bin/activate`.
+
+    We also recommend that an environment variable `TURTLE_COMPILATION_FLAGS`
+    is defined appropriately.
+    In particular, for clusters of unknown architecture it helps to log into
+    individual nodes and run the following command:
+
+    .. code:: bash
+
+        gcc -march=native -Q --help=target
+
+    Note that an example file `example.bashrc` is included in the
+    repository.
+    There is at least one recorded case of a cluster where different
+    FFTW libraries (serial, MPI and OpenMP) were located in different
+    folders, hence the options of specifying the details.
+    See also lines 143 through 160 of `CMakeLists.txt`.
+
+    *If using openmpi* you may need to recompile openmpi with the
+    `--enable-mpi1-compatibility` flag, in order for HDF5 to compile and
+    link (step 4).
+
+    *Note*: in principle it is possible to add this information to your
+    `.bashrc`, but we recommend against it.
+
+10.
+    In the previously created build folder, execute
+
+    .. code:: bash
+
+        source bash_setup_for_TurTLE.sh
+        cmake .. -DCMAKE_INSTALL_PREFIX=TurTLE_DIR
         make
         # to get a verbose compilation process, use
         # VERBOSE=1 make
@@ -225,62 +262,103 @@ Detailed full installation instructions:
         # (where N is the number of available cores)
         make install
 
-9.
-    If you used a custom install location (i.e. `CMAKE_INSTALL_PREFIX`)
-    you must include this location in the environment variable
-    `CMAKE_PREFIX_PATH`.
-    This ensures that the required `TurTLEConfig.cmake` file is accessible for
-    future use by the package.
-    Alternatively you may set the `TurTLE_DIR` environment variable to
-    include the location instead.
-
-10.
+11.
     Using TurTLE from an external project.
     TurTLE creates and installs 3 files alongside the C++ headers and
     library:
 
     .. code:: bash
 
-        -- Installing: install/lib/TurTLEConfig.cmake
-        -- Installing: install/lib/TurTLE_EXPORT.cmake
-        -- Installing: install/lib/TurTLE_EXPORT-noconfig.cmake
+        TurTLEConfig.cmake
+        TurTLE_EXPORT.cmake
+        TurTLE_EXPORT-noconfig.cmake
 
+    These files are installed under `${TurTLE_DIR}/lib`.
     In case these files provide incomplete information, it is necessary to update
-    the cmake input config file: turtle/cmake/TurTLEConfig.cmake.in.
+    the cmake input config file: `turtle/cmake/TurTLEConfig.cmake.in`.
 
-11.
-    Optional: building for multiple target systems.
-    When creating several different build folders for different targets,
-    it is recommended to create build-specific bash settings in a file
-    named `bash_setup_for_TurTLE.sh`, as well as a build-specific copy
-    of the `host_info.py` file.
-    In the `extra_slurm_lines` list from `host_info` one should then add a
+12.
+    For clusters.
+    In the `extra_slurm_lines` list from `host_info.py` one should add a
     corresponding `source INSTALL_DIR/lib/bash_setup_for_TurTLE.sh`
-    line, such that the appropriate setup is loaded in the job script.
+    line (where you should write the explicit install folder instead of
+    `INSTALL_DIR`), such that the appropriate setup is loaded in the job
+    script.
 
---------------------
-Scaling tests.
---------------------
+**Uninstall**
 
-Initial scaling data is available at (url coming soon).
-Please copy the files to the location `TURTLE_FIELD_DATABASE`.
+If you installed TurTLE in a virtual environment, you may simply remove
+the virtual environment.
 
-Separately, please recompile TurTLE with the `TIMING_OUTPUT` cmake
-option switched to `ON`.
+If you installed TurTLE in a default Python location, then you should
+navigate to the corresponding `site-packages` folder, and manually
+remove all folders/files containing "TurTLE" in their name.
+On linux systems Python will typically use something like
+`/usr/lib/python3/dist-packages` or
+`~/.local/lib/python3.x/site-packages` (you should be able to find all
+relevant locations in the `sys.path` list).
+This also applies if you used a virtual environment, but you'd like to
+clean it for any reason.
 
-Afterwards, please run variations of the following command:
+**Documentation**
 
-    .. code:: bash
+A local build of the documentation is possible where necessary.
+Doxygen is used for the C++ source code, and the `Sphinx` and `breathe`
+Python packages are used to merge the Python wrapper documentation with
+the C++ documentation.
+The optional `CMake` targets `doc_doxygen`, `doc_html` and `doc_latex`
+generate the required documents in the build directory.
+As long as the full cmake installation is possible (see below), one would
+proceed as follows to generate the documentation locally:
+
+.. code:: bash
+
+    mkdir build-doc
+    cd build-doc
+    cmake ..
+    make doc_doxygen
+    make doc_html
+    cd sphinx_latex
+    make
+
+After these steps, the (HTML) manual is available under
+`build-doc/sphinx_html/index.html`, and a PDF version of the manual can
+be found at `build-doc/sphinx_latex/TurTLE.pdf`.
+The stand-alone `doxygen`-generated documentation is present at
+`build-doc/html/index.html`.
 
-        python ${TURTLE_REPOSITORY}/tests/DNS/test_scaling.py D \
-            -n 128 \
-            --nprocesses 4 \
-            --ncores 1 \
-            --src-wd ${TURTLE_FIELD_DATABASE} \
-            --src-iteration 8192
 
-Available iterations for
+---------------------
+Reference publication
+---------------------
 
-    * n = 128: 8192
+Please see https://arxiv.org/abs/2107.01104 for a description of TurTLE,
+as well as a detailed discussion of the novel particle tracking
+approach.
+This is also the publication to be cited by works that made use of TurTLE.
 
+-------
+Contact
+-------
+
+If you have any questions, comments or suggestions, please contact
+Dr. Cristian C. Lalescu (Cristian.Lalescu@mpcdf.mpg.de).
+
+--------
+Comments
+--------
+
+* the `cmake` folder contains files extracted from
+  https://gitlab.inria.fr/solverstack/morse_cmake, a separate project licensed
+  under the "CeCILL-C" license, please see
+  http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html for
+  details.
+
+* particles: initialization of multistep solvers is done with lower
+  order methods, so direct convergence tests will fail.
+
+* code is only known to work with HDF5 1.8.x.
+
+* code is used mainly with Python 3.5 and later, and it is not tested at
+  all with Python 2.x
 
diff --git a/TurTLE/DNS.py b/TurTLE/DNS.py
index be35485a016440b40a3220f6dba9fa85e2d172d0..c69dfac3fb19121a318f27f5a787c5b81513e791 100644
--- a/TurTLE/DNS.py
+++ b/TurTLE/DNS.py
@@ -113,27 +113,49 @@ class DNS(_code):
     def generate_default_parameters(self):
         # these parameters are relevant for all DNS classes
         self.parameters['fftw_plan_rigor'] = 'FFTW_ESTIMATE'
+        self.parameter_description['fftw_plan_rigor'] = 'FFTW plan rigor to use. One of `FFTW_ESTIMATE`, `FFTW_MEASURE`, `FFTW_PATIENT`. Please see FFTW documentation.'
         self.parameters['dealias_type'] = int(1)
+        self.parameter_description['dealias_type'] = 'Dealiasing mehtod to use, integer. Options are: two-thirds (0) or smooth (1).'
         self.parameters['dkx'] = float(1.0)
+        self.parameter_description['dkx'] = 'Smallest wavenumber in the x direction for a pseudo-spectral run.'
         self.parameters['dky'] = float(1.0)
+        self.parameter_description['dky'] = 'Smallest wavenumber in the y direction for a pseudo-spectral run.'
         self.parameters['dkz'] = float(1.0)
+        self.parameter_description['dkz'] = 'Smallest wavenumber in the z direction for a pseudo-spectral run.'
         self.parameters['niter_todo'] = int(8)
+        self.parameter_description['niter_todo'] = 'Number of iterations to compute during a single run.'
         self.parameters['niter_stat'] = int(1)
+        self.parameter_description['niter_stat'] = 'Interval (in iterations) over which to compute field statistics (i.e. call `do_stats`).'
         self.parameters['niter_out'] = int(8)
+        self.parameter_description['niter_out'] = 'Output is performed every `NITER_OUT` iterations.'
         self.parameters['checkpoints_per_file'] = int(1)
+        self.parameter_description['checkpoints_per_file'] = 'Number of checkpoints to store in a single checkpoint file. Rule of thumb: files should hold gigabytes of data, rather than megabytes.'
         self.parameters['dt'] = float(0.01)
+        self.parameter_description['dt'] = 'Fixed timestep to use. It is strongly recommended not to change this value in between jobs.'
         self.parameters['nu'] = float(0.1)
+        self.parameter_description['nu'] = 'Viscosity value used in the equations, given in code units.'
         self.parameters['fmode'] = int(1)
+        self.parameter_description['fmode'] = 'Forcing parameter: mode to use for the Kolmogorov forcing.'
         self.parameters['famplitude'] = float(0.5)
+        self.parameter_description['famplitude'] = 'Forcing parameter: amplitude of Kolmogorov forcing, in code units.'
         self.parameters['friction_coefficient'] = float(0.5)
+        self.parameter_description['friction_coefficient'] = 'Forcing parameter: drag coefficient, in code units.'
         self.parameters['energy'] = float(0.5)
+        self.parameter_description['energy'] = 'Forcing parameter: if fluid is forced by enforcing a constant energy, this is the value (in code units).'
         self.parameters['injection_rate'] = float(0.4)
+        self.parameter_description['injection_rate'] = 'Forcing parameter: if a fixed energy injection rate is used, this is the value (in code units).'
         self.parameters['fk0'] = float(2.0)
+        self.parameter_description['fk0'] = 'Forcing parameter: if forcing acts on wavenumber band, this is the smallest wavenumber where it acts (in code units).'
         self.parameters['fk1'] = float(4.0)
+        self.parameter_description['fk1'] = 'Forcing parameter: if forcing acts on wavenumber band, this is the largest wavenumber where it acts (in code units).'
         self.parameters['forcing_type'] = 'fixed_energy_injection_rate'
+        self.parameter_description['forcing_type'] = 'Forcing parameter: what type of force to use.'
         self.parameters['histogram_bins'] = int(256)
+        self.parameter_description['histogram_bins'] = 'During statistics, histograms of real-valued fields are computed using a number of `HISTOGRAM_BINS` bins.'
         self.parameters['max_velocity_estimate'] = float(1)
+        self.parameter_description['max_velocity_estimate'] = 'During statistics, velocity histogram bins are computed using this estimate (see code for details).'
         self.parameters['max_vorticity_estimate'] = float(1)
+        self.parameter_description['max_velocity_estimate'] = 'During statistics, vorticity histogram bins are computed using this estimate (see code for details).'
         # parameters specific to particle version
         self.NSVEp_extra_parameters = {}
         self.NSVEp_extra_parameters['niter_part'] = int(1)
diff --git a/TurTLE/_base.py b/TurTLE/_base.py
index 5a35f9b0f6f92c7c49a9740f537b597fe241dd98..1275df7efa1df2e340e4ae38040a456c02e53de0 100644
--- a/TurTLE/_base.py
+++ b/TurTLE/_base.py
@@ -42,6 +42,11 @@ class _base(object):
         self.parameters = {'nx' : 32,
                            'ny' : 32,
                            'nz' : 32}
+        self.parameter_description = {
+                'nx' : 'Number of real-space grid nodes in the x direction.',
+                'ny' : 'Number of real-space grid nodes in the y direction.',
+                'nz' : 'Number of real-space grid nodes in the z direction.',
+                }
         self.string_length = 512
         self.work_dir = os.path.realpath(work_dir)
         self.simname = simname
@@ -329,14 +334,22 @@ class _base(object):
     def parameters_to_parser_arguments(
             self,
             parser,
-            parameters = None):
+            parameters = None,
+            parameter_description = None):
         if type(parameters) == type(None):
             parameters = self.parameters
+        if type(parameter_description) == type(None):
+            parameter_description = self.parameter_description
         for k in sorted(parameters.keys()):
+            if k in parameter_description.keys():
+                description = parameter_description[k]
+            else:
+                description = 'No description available.'
             parser.add_argument(
                     '--{0}'.format(k),
                     type = type(parameters[k]),
                     dest = k,
+                    help = description,
                     default = None)
         return None
 
diff --git a/cpp/env_utils.hpp b/cpp/env_utils.hpp
index 7530c2a6a11fc6292de57de3d7411fb5d8d9a687..f030c7c8a4fec908fff0b2f7514ead49d151c416 100644
--- a/cpp/env_utils.hpp
+++ b/cpp/env_utils.hpp
@@ -34,7 +34,9 @@
 #include <cstring>
 #include <array>
 
-
+/** \class env_utils
+ * \brief utilities
+ */
 class env_utils {
     template <class VariableType>
     static const VariableType StrToOther(const char* const str, const VariableType& defaultValue = VariableType()){
diff --git a/cpp/field_binary_IO.hpp b/cpp/field_binary_IO.hpp
index 66234ab39f7e0a1172c1c80ad464c33ab4a8d8e5..0742a2cb0408ea5aee2022aca02d62104fdfea13 100644
--- a/cpp/field_binary_IO.hpp
+++ b/cpp/field_binary_IO.hpp
@@ -49,6 +49,14 @@ constexpr MPI_Datatype mpi_type(
             mpi_real_type<rnumber>::complex());
 }
 
+/** \class field_binary_IO
+ * \brief A class to handle binary field IO
+ *
+ * \tparam rnumber field data type
+ * \tparam fr field representation (REAL or COMPLEX)
+ * \tparam fc number of field components
+ */
+
 template <typename rnumber, field_representation fr, field_components fc>
 class field_binary_IO:public field_layout<fc>
 {
diff --git a/cpp/kspace.cpp b/cpp/kspace.cpp
index 16ae51cfde7b6c825790e225289dfe402f307c2b..921e498de2330bc64b97fc76ccd3492955448ede 100644
--- a/cpp/kspace.cpp
+++ b/cpp/kspace.cpp
@@ -343,6 +343,10 @@ void kspace<be, dt>::general_M_filter(
 
 
 /** \brief Filter a field using a Gaussian kernel.
+ *
+ * \tparam rnumber type of real number, float or double.
+ * \tparam fc field components, ONE, THREE or THREExTHREE.
+ * \return nothing
  *
  *  Filter's mathematical expression in Fourier space is as follows:
  *  \f[
diff --git a/cpp/kspace.hpp b/cpp/kspace.hpp
index f082f11ed9f73e9dd5b5ba9f11ed0d17a060f777..656b8ca290b32a9cf87bbdb0305ac7e8a2b7fe43 100644
--- a/cpp/kspace.hpp
+++ b/cpp/kspace.hpp
@@ -38,19 +38,20 @@
 enum field_backend {FFTW};
 enum kspace_dealias_type {ONE_HALF, TWO_THIRDS, SMOOTH};
 
-/** \brief A class for handling Fourier representation tasks.
+/** \class kspace
+ *  \brief A class for handling Fourier representation tasks.
  *
- *      - contains wavenumber information (specific to each MPI process).
- *      This includes values of kx, ky, kz, including lowest modes dkx etc,
- *      as well as number of modes within sferical shells and mean wavenumber
- *      within shells.
- *      - has methods for spectrum computation and similar.
- *      - has methods for filtering.
- *      - has CLOOP methods, useful for computing arbitrary formulas over the
- *      Fourier space grid (i.e. use lambda expressions).
+ *  \tparam be field backend, currently only FFTW is possible.
+ *  \tparam dt dealiasing mode, either ONE_HALF, TWO_THIRDS or SMOOTH.
  *
- *      \tparam be field backend, currently only FFTW is possible.
- *      \tparam dt dealiasing mode, either ONE_HALF, TWO_THIRDS or SMOOTH.
+ *  Contains wavenumber information (specific to each MPI process).
+ *  This includes values of kx, ky, kz, including lowest modes dkx etc,
+ *  as well as number of modes within sferical shells and mean wavenumber
+ *  within shells.
+ *  - has methods for spectrum computation and similar.
+ *  - has methods for filtering.
+ *  - has CLOOP methods, useful for computing arbitrary formulas over the
+ *  Fourier space grid (i.e. use lambda expressions).
  */
 template <field_backend be,
           kspace_dealias_type dt>
@@ -87,10 +88,7 @@ class kspace
                 typename fftw_interface<rnumber>::complex *__restrict__ a,
                 const double kmax);
 
-        /**
-         * \tparam rnumber type of real number, float or double.
-         * \tparam fc field components, ONE, THREE or THREExTHREE.
-         */
+
         template <typename rnumber,
                   field_components fc>
         void Gauss_filter(
diff --git a/cpp/particles/p2p/p2p_ghost_collisions.hpp b/cpp/particles/p2p/p2p_ghost_collisions.hpp
index 5602485397e1f54232533b1997431110698dc97e..92a39ff4ab4f3042dd387a40794fe2d4a5a59d61 100644
--- a/cpp/particles/p2p/p2p_ghost_collisions.hpp
+++ b/cpp/particles/p2p/p2p_ghost_collisions.hpp
@@ -63,6 +63,11 @@ class p2p_ghost_collisions
         particle_shape current_particle_shape;
 
     protected:
+        /** \brief Adds pair of colliding particle to list
+         *
+         * Given a pair of particle IDs, add them as an *ordered pair* to `collision_pairs_local`.
+         *
+         */
         void add_colliding_pair(partsize_t idx_part1, partsize_t idx_part2)
         {
             // store colliding particle ids in order, to be able to identify pairs more easily
diff --git a/cpp/scope_timer.hpp b/cpp/scope_timer.hpp
index 9e822e65d43fe2942a2c23d0fdd97d6d515d4d87..91365e01695b9a4b577d8863302b9fbed1002a12 100644
--- a/cpp/scope_timer.hpp
+++ b/cpp/scope_timer.hpp
@@ -94,7 +94,7 @@ protected:
         omp_destroy_lock(&m_updateLock);
       }
 
-      /** Add a record */
+      /** \brief Add a record */
       void addRecord(const double inDuration, const bool isTask) {
   #pragma omp atomic update
         m_totalTime += inDuration;
@@ -123,7 +123,8 @@ protected:
         omp_unset_lock(&m_childrenLock);
       }
 
-      //! Must not be called during a paralle execution
+      /** \note Must not be called during a paralle execution
+       */
       const std::vector<CoreEvent*>& getChildren() const {
         assert(omp_in_parallel() == 0);
         return m_children;
diff --git a/cpp/turtle_timer.hpp b/cpp/turtle_timer.hpp
index 0bf74c7c3fb7a3296c543021b7493ba862260ac5..60478d58d3f9d6411402b7d675673d1208c47cca 100644
--- a/cpp/turtle_timer.hpp
+++ b/cpp/turtle_timer.hpp
@@ -27,21 +27,26 @@
 #include <chrono>
 
 /**
-  * @file
+ * @file
  *
  * Each section to measure should be embraced by start/stop.
- * The measured time is given by "getElapsed".
- * The total time measured by a timer is given by "getCumulated".
+ * The measured time is given by `getElapsed`.
+ * The total time measured by a timer is given by `getCumulated`.
  * Example :
- * @code turtle_timer tm; // Implicit start
- * @code ...
- * @code tm.stop(); // stop the timer
- * @code tm.getElapsed(); // return the duration in s [A]
- * @code tm.start(); // restart the timer
- * @code ...
- * @code tm.stopAndGetElapsed(); // stop the timer and return the duraction in s
- * [B]
- * @code tm.getCumulated(); // Equal [A] + [B]
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.cpp}
+ *     turtle_timer tm; // Implicit start
+ *     ...
+ *     tm.stop(); // stop the timer
+ *     tm.getElapsed(); // return the duration in s [A]
+ *     tm.start(); // restart the timer
+ *     ...
+ *     tm.stopAndGetElapsed(); // stop the timer and return the duraction in s [B]
+ *     tm.getCumulated(); // Equal [A] + [B]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/** \class turtle_timer
  */
 class turtle_timer {
     using double_second_time = std::chrono::duration<double, std::ratio<1, 1>>;
@@ -102,3 +107,4 @@ public:
 };
 
 #endif//TURTLE_TIMER_HPP
+
diff --git a/documentation/chapters/AUTHORS b/documentation/chapters/AUTHORS
new file mode 120000
index 0000000000000000000000000000000000000000..f04b7e8a2af221a97d050fdf3e89cb77fe4407ef
--- /dev/null
+++ b/documentation/chapters/AUTHORS
@@ -0,0 +1 @@
+../../AUTHORS
\ No newline at end of file
diff --git a/documentation/chapters/api.rst b/documentation/chapters/api.rst
index 239ec0cac160c9a32fe5fe21d1eb1c208f664142..16b3b3b592d54fc204ea124c45b5e097799f9142 100644
--- a/documentation/chapters/api.rst
+++ b/documentation/chapters/api.rst
@@ -1,6 +1,6 @@
-===
-API
-===
+==========
+Python API
+==========
 
 
 ------------
diff --git a/documentation/chapters/cpp_doxygen.rst b/documentation/chapters/cpp_doxygen.rst
index 51d1c9cca6c637fbce1335a3b8d6c358ddb6a2fd..e1bc1d942c3f2fa5ed1bac65d66bc245326973d6 100644
--- a/documentation/chapters/cpp_doxygen.rst
+++ b/documentation/chapters/cpp_doxygen.rst
@@ -1,33 +1,66 @@
----
-CPP
----
+-----------
+C++ classes
+-----------
+
+
+kspace
+------
 
 .. doxygenclass:: kspace
     :project: TurTLE
     :members:
 
+
+field
+-----
+
 .. doxygenclass:: field
     :project: TurTLE
     :members:
 
+
+code_base
+---------
+
 .. doxygenclass:: code_base
     :project: TurTLE
     :members:
 
+
+direct_numerical_simulation
+---------------------------
+
 .. doxygenclass:: direct_numerical_simulation
     :project: TurTLE
     :members:
 
-.. doxygenclass:: NSVE
-    :project: TurTLE
-    :members:
 
-.. doxygenclass:: particles_distr_mpi
-    :project: TurTLE
-    :path: ...
-    :members: [...]
-    :protected-members:
-    :private-members:
-    :undoc-members:
-    :outline:
-    :no-link:
+..
+    NSE
+    ----
+    
+    .. doxygenclass:: NSE
+        :project: TurTLE
+        :members:
+    
+    
+    NSVE
+    ----
+    
+    .. doxygenclass:: NSVE
+        :project: TurTLE
+        :members:
+    
+    
+    particle_set
+    ------------
+    
+    .. doxygenclass:: particle_set
+        :project: TurTLE
+        :path: ...
+        :members: [...]
+        :protected-members:
+        :private-members:
+        :undoc-members:
+        :outline:
+        :no-link:
diff --git a/documentation/chapters/overview.rst b/documentation/chapters/overview.rst
deleted file mode 100644
index 5df4eb6f40ec72ee5f50c866b72fefefc6046fb1..0000000000000000000000000000000000000000
--- a/documentation/chapters/overview.rst
+++ /dev/null
@@ -1,271 +0,0 @@
-=====================
-Overview and Tutorial
-=====================
-
-----------------
-General comments
-----------------
-
-The purpose of this code is to run pseudo-spectral DNS of turbulence,
-and integrate particle trajectories in the resulting fields.
-An important aim of the code is to simplify the launching of
-compute jobs and postprocessing, up to and including the generation of
-publication-ready figures.
-
-For research, people routinely write code from scratch because research
-goals change to a point where modifying the previous code is too
-expensive.
-With TurTLE, the desire is to identify core functionality that should be
-implemented in a library.
-The core library can then be used by many problem-specific codes.
-
-In this sense, the structuring of the code-base is non-standard.
-The core functionality is implemented in C++ (classes useful for
-describing working with fields or sets of particles), while a Python3
-wrapper is used for generating "main" programmes to be linked against
-the core library.
-The core library uses a hybrid MPI/OpenMP approach for parallelization, and the Python3 wrapper
-compiles this core library when being installed.
-
-Python3 "wrapper"
------------------
-
-In principle, users of the code should only need to use Python3 for
-launching jobs and postprocessing data.
-
-Classes defined in the Python3 package can be used to generate executable
-codes, compile/launch them, and then for accessing and postprocessing
-data with a full Python3 environment.
-
-Code generation is quite straightforward, with C++ code snippets handled
-as strings in the Python3 code, such that they can be combined in
-different ways.
-
-Once a "main" file has been written, it is compiled and linked against
-the core library.
-Depending on machine-specific settings, the code can then be launched
-directly, or job scripts appropriate for queueing systems are generated
-and submitted.
-
-C++ core library
-----------------
-
-TurTLE has a hierarchy of classes that provide prototypes for three basic tasks: perform a DNS, post-process existing data or test arbitrary functionality.
-As a guiding principle, the distinct tasks and concepts involved in the numerical simulation in TurTLE are isolated as much as possible, which simplifies the development of extensions.
-
-There are two types of simple objects.
-Firstly, an abstract class encapsulates three elements: generic *initialization*, *do work* and *finalization* functionality.
-Secondly, essential data structures (i.e. fields, sets of particles) and associated functionality (i.e. I/O) are provided by "building block"-classes.
-The solver then consists of a specific arrangement of the building blocks.
-
-The heterogeneous TurTLE development team benefits from the separation of generic functionality from building blocks:
-TurTLE is naturally well-suited to the distribution of conceptually distinct work, in particular fully isolating projects such as, e.g., "implement new numerical method" from "optimize the computation of field statistics with OpenMP".
-
----------
-Equations
----------
-
-The code uses a fairly standard pseudo-spectral algorithm to solve fluid
-equations.
-The incompressible Navier Stokes equations in velocity form are as
-follows:
-
-.. math::
-
-    \partial_t \mathbf{u} + \mathbf{u} \cdot \nabla \mathbf{u} =
-    - \nabla p + \nu \Delta \mathbf{u} + \mathbf{f}
-
-In fact, the code solves the vorticity formulation of these equations:
-
-.. math::
-    \partial_t \mathbf{\omega} +
-    \mathbf{u} \cdot \nabla \mathbf{\omega} =
-    \mathbf{\omega} \cdot \nabla \mathbf{u} +
-    \nu \Delta \mathbf{\omega} + \nabla \times \mathbf{f}
-
-Statistics
-----------
-
-Basic quantities that can be computed in a pseudospectral code are the
-following:
-
-.. math::
-
-    E = \frac{1}{2} \sum_{\mathbf{k}} \hat{\mathbf{u}} \cdot \hat{\mathbf{u}}^*, \hskip .5cm
-    \varepsilon = \nu \sum_{\mathbf{k}} k^2 \hat{\mathbf{u}} \cdot \hat{\mathbf{u}}^*, \hskip .5cm
-    \textrm{in general } \sum_{\mathbf{k}} k^p \hat{u_i} \cdot \hat{u_j}^*, \hskip .5cm
-    \varepsilon_{\textrm{inj}} = \sum_{\mathbf{k}} \hat{\mathbf{u}} \cdot \hat{\mathbf{f}}^*
-
-
-In fact, C++ code generated by
-:class:`NavierStokes <bfps.NavierStokes.NavierStokes>`
-computes and stores the velocity
-and vorticity cospectra (9 components each):
-
-.. math::
-
-    \sum_{k \leq \|\mathbf{k}\| \leq k+dk}\hat{u_i} \cdot \hat{u_j}^*, \hskip .5cm
-    \sum_{k \leq \|\mathbf{k}\| \leq k+dk}\hat{\omega_i} \cdot \hat{\omega_j}^*
-
-In all honesty, this is overkill for homogenous and isotropic flows, but
-in principle we will look at more complicated flows.
-
-See :func:`compute_statistics <bfps.NavierStokes.NavierStokes.compute_statistics>`
-and
-:func:`compute_time_averages <bfps.NavierStokes.NavierStokes.compute_time_averages>`
-for quantities
-computed in postprocessing by the python code.
-
------------
-Conventions
------------
-
-The C++ backend is based on ``FFTW``, and the Fourier
-representations are *transposed*.
-In brief, this is the way the fields are represented on disk and in
-memory (both in the C++ backend and in Python postprocessing):
-
-    * real space representations of 3D vector fields consist of
-      contiguous arrays, with the shape ``(nz, ny, nx, 3)``:
-      :math:`n_z \times n_y \times n_x` triplets, where :math:`z` is the
-      slowest coordinate, :math:`x` the fastest; each triplet is then
-      the sequence of :math:`x` component, :math:`y` component and
-      :math:`z` component.
-
-    * Fourier space representations of 3D vector fields consist of
-      contiguous arrays, with the shape ``(ny, nz, nx/2+1, 3)``:
-      :math:`k_y` is the slowest coordinate, :math:`k_x` the fastest;
-      each triplet of 3 complex numbers is then the :math:`(x, y, z)`
-      components, as ``FFTW`` requires for the correspondence with the
-      real space representations.
-
-:func:`read_cfield <bfps.NavierStokes.NavierStokes.read_cfield>` will return
-a properly shaped ``numpy.array`` containing a snapshot of the Fourier
-representation of a 3D field.
-
-If you'd like to construct the corresponding wave numbers, you can
-follow this procedure:
-
-.. code:: python
-
-    import numpy as np
-    from bfps import NavierStokes
-
-    c = NavierStokes(
-            work_dir = '/location/of/simulation/data',
-            simname = 'simulation_name_goes_here')
-    df = c.get_data_file()
-    kx = df['kspace/kx'].value
-    ky = df['kspace/ky'].value
-    kz = df['kspace/kz'].value
-    df.close()
-    kval = np.zeros(kz.shape + ky.shape + kx.shape + (3,),
-                    dtype = kx.dtype)
-    kval[..., 0] = kx[None, None, :]
-    kval[..., 1] = ky[:, None, None]
-    kval[..., 2] = kz[None, :, None]
-
-``kval`` will have the same shape as the result of
-:func:`read_cfield <NavierStokes.NavierStokes.read_cfield>`.
-Obviously, the machine being used should have enough RAM to hold the
-field...
-
---------
-Tutorial
---------
-
-First DNS
----------
-
-Installing ``bfps`` is not trivial, and the instructions are in
-:ref:`sec-installation`.
-After installing, you should have a new executable script
-available, called ``bfps``, that you can execute.
-Just executing it will run a small test DNS on a real space grid of size
-:math:`32 \times 32 \times 32`, in the current
-folder, with the simulation name ``test``.
-So, open a console, and type ``bfps DNS NSVE``:
-
-.. code:: bash
-
-    # depending on how curious you are, you may have a look at the
-    # options first:
-    bfps --help
-    bfps DNS --help
-    bfps DNS NSVE --help
-    # or you may just run it:
-    bfps DNS NSVE
-
-The simulation itself should not take more than a few seconds, since
-this is just a :math:`32^3` simulation run for 8 iterations.
-First thing you can do afterwards is open up a python console, and type
-the following:
-
-.. _sec-first-postprocessing:
-
-.. code:: python
-
-    import numpy as np
-    from bfps import DNS
-
-    c = DNS(
-            work_dir = '/location/of/simulation/data',
-            simname = 'simulation_name_goes_here')
-    c.compute_statistics()
-    print ('Rlambda = {0:.0f}, kMeta = {1:.4f}, CFL = {2:.4f}'.format(
-            c.statistics['Rlambda'],
-            c.statistics['kMeta'],
-            (c.parameters['dt']*c.statistics['vel_max'] /
-             (2*np.pi/c.parameters['nx']))))
-    print ('Tint = {0:.4e}, tauK = {1:.4e}'.format(c.statistics['Tint'],
-                                                   c.statistics['tauK']))
-    data_file = c.get_data_file()
-    print ('total time simulated is = {0:.4e} Tint, {1:.4e} tauK'.format(
-            data_file['iteration'].value*c.parameters['dt'] / c.statistics['Tint'],
-            data_file['iteration'].value*c.parameters['dt'] / c.statistics['tauK']))
-
-:func:`compute_statistics <bfps.DNS.DNS.compute_statistics>`
-will read the data
-file generated by the DNS, compute a bunch of basic statistics, for
-example the Taylor scale Reynolds number :math:`R_\lambda` that we're
-printing in the example code.
-
-What happens is that the DNS will have generated an ``HDF5`` file
-containing a bunch of specific datasets (spectra, moments of real space
-representations, etc).
-The function
-:func:`compute_statistics <bfps.DNS.DNS.compute_statistics>`
-performs simple postprocessing that may however be expensive, therefore
-it also saves some data into a ``<simname>_postprocess.h5`` file, and
-then it also performs some time averages, yielding the ``statistics``
-dictionary that is used in the above code.
-
-Behind the scenes
------------------
-
-TODO FIXME obsolete documentation
-
-In brief the following takes place:
-
-    1. An instance ``c`` of
-       :class:`NavierStokes <bfps.NavierStokes.NavierStokes>` is created.
-       It is used to generate an :class:`argparse.ArgumentParser`, and
-       it processes command line arguments given to the ``bfps
-       NavierStokes`` command.
-    2. reasonable DNS parameters are constructed from the command line
-       arguments.
-    3. ``c`` generates a parameter file ``<simname>.h5``, into which the
-       various parameters are written.
-       ``c`` also generates the various datasets that the backend code
-       will write into (statistics and other stuff).
-    4. ``c`` writes a C++ file that is compiled and linked against
-       ``libbfps``.
-    5. ``c`` executes the C++ code using ``mpirun``.
-    6. the C++ code actually performs the DNS, and outputs various
-       results into the ``<simname>.h5`` file.
-
-After the simulation is done, things are simpler.
-In fact, any ``HDF5`` capable software can be used to read the data
-file, and the dataset names should be reasonably easy to interpret, so
-custom postprocessing codes can easily be generated.
-
diff --git a/documentation/chapters/tutorial.rst b/documentation/chapters/tutorial.rst
index f8e78242317b4859aa04664307e4c0c2751613c0..4ee154cc6544a7929546a830984f6ca05f2c667e 100644
--- a/documentation/chapters/tutorial.rst
+++ b/documentation/chapters/tutorial.rst
@@ -1,6 +1,65 @@
-========
-Tutorial
-========
+=====================
+Overview and Tutorial
+=====================
+
+----------------
+General comments
+----------------
+
+The purpose of this code is to run pseudo-spectral DNS of turbulence,
+and integrate particle trajectories in the resulting fields.
+An important aim of the code is to simplify the launching of
+compute jobs and postprocessing, up to and including the generation of
+publication-ready figures.
+
+For research, people routinely write code from scratch because research
+goals change to a point where modifying the previous code is too
+expensive.
+With TurTLE, the desire is to identify core functionality that should be
+implemented in a library.
+The core library can then be used by many problem-specific codes.
+
+In this sense, the structuring of the code-base is non-standard.
+The core functionality is implemented in C++ (classes useful for
+describing working with fields or sets of particles), while a Python3
+wrapper is used for generating "main" programmes to be linked against
+the core library.
+The core library uses a hybrid MPI/OpenMP approach for parallelization, and the Python3 wrapper
+compiles this core library when being installed.
+
+Python3 "wrapper"
+-----------------
+
+In principle, users of the code should only need to use Python3 for
+launching jobs and postprocessing data.
+
+Classes defined in the Python3 package can be used to generate executable
+codes, compile/launch them, and then for accessing and postprocessing
+data with a full Python3 environment.
+
+Code generation is quite straightforward, with C++ code snippets handled
+as strings in the Python3 code, such that they can be combined in
+different ways.
+
+Once a "main" file has been written, it is compiled and linked against
+the core library.
+Depending on machine-specific settings, the code can then be launched
+directly, or job scripts appropriate for queueing systems are generated
+and submitted.
+
+C++ core library
+----------------
+
+TurTLE has a hierarchy of classes that provide prototypes for three basic tasks: perform a DNS, post-process existing data or test arbitrary functionality.
+As a guiding principle, the distinct tasks and concepts involved in the numerical simulation in TurTLE are isolated as much as possible, which simplifies the development of extensions.
+
+There are two types of simple objects.
+Firstly, an abstract class encapsulates three elements: generic *initialization*, *do work* and *finalization* functionality.
+Secondly, essential data structures (i.e. fields, sets of particles) and associated functionality (i.e. I/O) are provided by "building block"-classes.
+The solver then consists of a specific arrangement of the building blocks.
+
+The heterogeneous TurTLE development team benefits from the separation of generic functionality from building blocks:
+TurTLE is naturally well-suited to the distribution of conceptually distinct work, in particular fully isolating projects such as, e.g., "implement new numerical method" from "optimize the computation of field statistics with OpenMP".
 
 ---------
 Equations
@@ -191,3 +250,29 @@ In fact, any ``HDF5`` capable software can be used to read the data
 file, and the dataset names should be reasonably easy to interpret, so
 custom postprocessing codes can easily be generated.
 
+
+--------------------
+Scaling tests.
+--------------------
+
+Initial scaling data is available at (url coming soon).
+Please copy the files to the location `TURTLE_FIELD_DATABASE`.
+
+Separately, please recompile TurTLE with the `TIMING_OUTPUT` cmake
+option switched to `ON`.
+
+Afterwards, please run variations of the following command:
+
+    .. code:: bash
+
+        python ${TURTLE_REPOSITORY}/tests/DNS/test_scaling.py D \
+            -n 128 \
+            --nprocesses 4 \
+            --ncores 1 \
+            --src-wd ${TURTLE_FIELD_DATABASE} \
+            --src-iteration 8192
+
+Available iterations for
+
+    * n = 128: 8192
+
diff --git a/documentation/index.rst b/documentation/index.rst
index fc866054f00a40fcc9c07d1f0adfea7f8da81250..0566f7b4dad43652deef3c17020987a4ae0b896b 100644
--- a/documentation/index.rst
+++ b/documentation/index.rst
@@ -12,11 +12,10 @@ Contents:
     :maxdepth: 4
 
     chapters/README
-    chapters/overview
+    chapters/tutorial
     chapters/development
     chapters/bandpass
     chapters/api
-    chapters/cpp
     chapters/cpp_doxygen