diff --git a/bfps/cpp/base.hpp b/bfps/cpp/base.hpp
index adfdd62f772795269cbcc5241dcb881677e38e72..8fc5942f80106254eef68ba91e3643fa66e02891 100644
--- a/bfps/cpp/base.hpp
+++ b/bfps/cpp/base.hpp
@@ -24,6 +24,7 @@
 
 
 
+#include <cassert>
 #include <mpi.h>
 #include <stdarg.h>
 #include <iostream>
diff --git a/bfps/cpp/full_code/NSVE.cpp b/bfps/cpp/full_code/NSVE.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..826bf0e3d3d9875a727048b6432b92dedd23cd4f
--- /dev/null
+++ b/bfps/cpp/full_code/NSVE.cpp
@@ -0,0 +1,379 @@
+#include <string>
+#include <cmath>
+#include "NSVE.hpp"
+
+template <typename rnumber>
+int NSVE<rnumber>::read_iteration(void)
+{
+    /* read iteration */
+    hid_t dset;
+    hid_t iteration_file = H5Fopen(
+            (this->simname + std::string(".h5")).c_str(),
+            H5F_ACC_RDONLY,
+            H5P_DEFAULT);
+    dset = H5Dopen(
+            iteration_file,
+            "iteration",
+            H5P_DEFAULT);
+    H5Dread(
+            dset,
+            H5T_NATIVE_INT,
+            H5S_ALL,
+            H5S_ALL,
+            H5P_DEFAULT,
+            &this->iteration);
+    H5Dclose(dset);
+    dset = H5Dopen(
+            iteration_file,
+            "checkpoint",
+            H5P_DEFAULT);
+    H5Dread(
+            dset,
+            H5T_NATIVE_INT,
+            H5S_ALL,
+            H5S_ALL,
+            H5P_DEFAULT,
+            &this->checkpoint);
+    H5Dclose(dset);
+    H5Fclose(iteration_file);
+    DEBUG_MSG("simname is %s, iteration is %d and checkpoint is %d\n",
+            this->simname.c_str(),
+            this->iteration,
+            this->checkpoint);
+    return EXIT_SUCCESS;
+}
+
+template <typename rnumber>
+int NSVE<rnumber>::write_iteration(void)
+{
+    if (this->myrank == 0)
+    {
+        hid_t dset = H5Dopen(
+                this->stat_file,
+                "iteration",
+                H5P_DEFAULT);
+        H5Dwrite(
+                dset,
+                H5T_NATIVE_INT,
+                H5S_ALL,
+                H5S_ALL,
+                H5P_DEFAULT,
+                &this->iteration);
+        H5Dclose(dset);
+        dset = H5Dopen(
+                this->stat_file,
+                "checkpoint",
+                H5P_DEFAULT);
+        H5Dwrite(
+                dset,
+                H5T_NATIVE_INT,
+                H5S_ALL,
+                H5S_ALL,
+                H5P_DEFAULT,
+                &this->checkpoint);
+        H5Dclose(dset);
+    }
+    return EXIT_SUCCESS;
+}
+
+template <typename rnumber>
+int NSVE<rnumber>::initialize(void)
+{
+    this->read_iteration();
+    this->read_parameters();
+    if (this->myrank == 0)
+    {
+        // set caching parameters
+        hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
+        herr_t cache_err = H5Pset_cache(fapl, 0, 521, 134217728, 1.0);
+        DEBUG_MSG("when setting stat_file cache I got %d\n", cache_err);
+        this->stat_file = H5Fopen(
+                (this->simname + ".h5").c_str(),
+                H5F_ACC_RDWR,
+                fapl);
+    }
+    int data_file_problem;
+    if (this->myrank == 0)
+        data_file_problem = this->grow_file_datasets();
+    MPI_Bcast(&data_file_problem, 1, MPI_INT, 0, this->comm);
+    if (data_file_problem > 0)
+    {
+        std::cerr <<
+            data_file_problem <<
+            " problems growing file datasets.\ntrying to exit now." <<
+            std::endl;
+        return EXIT_FAILURE;
+    }
+    this->fs = new vorticity_equation<rnumber, FFTW>(
+            simname.c_str(),
+            nx, ny, nz,
+            dkx, dky, dkz,
+            FFTW_MEASURE);
+    this->tmp_vec_field = new field<rnumber, FFTW, THREE>(
+            nx, ny, nz,
+            this->comm,
+            FFTW_MEASURE);
+
+
+    this->fs->checkpoints_per_file = checkpoints_per_file;
+    this->fs->nu = nu;
+    this->fs->fmode = fmode;
+    this->fs->famplitude = famplitude;
+    this->fs->fk0 = fk0;
+    this->fs->fk1 = fk1;
+    strncpy(this->fs->forcing_type, forcing_type, 128);
+    this->fs->iteration = this->iteration;
+    this->fs->checkpoint = this->checkpoint;
+
+    this->fs->cvorticity->real_space_representation = false;
+    this->fs->io_checkpoint();
+
+    if (this->myrank == 0 && this->iteration == 0)
+        this->fs->kk->store(stat_file);
+    return EXIT_SUCCESS;
+}
+
+template <typename rnumber>
+int NSVE<rnumber>::main_loop(void)
+{
+    clock_t time0, time1;
+    double time_difference, local_time_difference;
+    time0 = clock();
+    bool stop_code_now = false;
+    int max_iter = (this->iteration + this->niter_todo -
+                    (this->iteration % this->niter_todo));
+    for (; this->iteration < max_iter;)
+    {
+    #ifdef USE_TIMINGOUTPUT
+        const std::string loopLabel = ("code::main_start::loop-" +
+                                       std::to_string(this->iteration));
+        TIMEZONE(loopLabel.c_str());
+    #endif
+        if (this->iteration % this->niter_stat == 0)
+            this->do_stats();
+        this->fs->step(dt);
+        this->iteration = this->fs->iteration;
+        if (this->fs->iteration % this->niter_out == 0)
+        {
+            this->fs->io_checkpoint(false);
+            this->checkpoint = this->fs->checkpoint;
+            this->write_iteration();
+        }
+        if (stop_code_now)
+            break;
+        time1 = clock();
+        local_time_difference = ((
+                (unsigned int)(time1 - time0)) /
+                ((double)CLOCKS_PER_SEC));
+        time_difference = 0.0;
+        MPI_Allreduce(
+                &local_time_difference,
+                &time_difference,
+                1,
+                MPI_DOUBLE,
+                MPI_SUM,
+                MPI_COMM_WORLD);
+        if (this->myrank == 0)
+            std::cout << "iteration " << iteration << " took " << time_difference/nprocs << " seconds" << std::endl;
+        if (this->myrank == 0)
+            std::cerr << "iteration " << iteration << " took " << time_difference/nprocs << " seconds" << std::endl;
+        time0 = time1;
+    }
+    if (this->iteration % this->niter_stat == 0)
+        this->do_stats();
+    time1 = clock();
+    local_time_difference = ((
+            (unsigned int)(time1 - time0)) /
+            ((double)CLOCKS_PER_SEC));
+    time_difference = 0.0;
+    MPI_Allreduce(
+            &local_time_difference,
+            &time_difference,
+            1,
+            MPI_DOUBLE,
+            MPI_SUM,
+            MPI_COMM_WORLD);
+    if (this->myrank == 0)
+        std::cout << "iteration " << iteration << " took " << time_difference/nprocs << " seconds" << std::endl;
+    if (this->myrank == 0)
+        std::cerr << "iteration " << iteration << " took " << time_difference/nprocs << " seconds" << std::endl;
+    if (this->iteration % this->niter_out != 0)
+    {
+        this->fs->io_checkpoint(false);
+        this->checkpoint = this->fs->checkpoint;
+        this->write_iteration();
+    }
+    return EXIT_SUCCESS;
+}
+
+template <typename rnumber>
+int NSVE<rnumber>::finalize(void)
+{
+    if (this->myrank == 0)
+        H5Fclose(this->stat_file);
+    delete this->fs;
+    delete this->tmp_vec_field;
+    return EXIT_SUCCESS;
+}
+
+//template <typename rnumber>
+//int NSVE<rnumber>::read_parameters(void)
+//{
+//    hid_t parameter_file;
+//    hid_t dset, memtype, space;
+//    char *string_data;
+//    parameter_file = H5Fopen(
+//            (this->simname + std::string(".h5")).c_str(),
+//            H5F_ACC_RDONLY,
+//            H5P_DEFAULT);
+//    dset = H5Dopen(parameter_file, "/parameters/checkpoints_per_file", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &checkpoints_per_file);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/dealias_type", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &dealias_type);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/dkx", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &dkx);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/dky", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &dky);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/dkz", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &dkz);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/dt", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &dt);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/famplitude", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &famplitude);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/fk0", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &fk0);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/fk1", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &fk1);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/fmode", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &fmode);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/forcing_type", H5P_DEFAULT);
+//    space = H5Dget_space(dset);
+//    memtype = H5Dget_type(dset);
+//    string_data = (char*)malloc(256);
+//    H5Dread(dset, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, &string_data);
+//    sprintf(forcing_type, "%s", string_data);
+//    free(string_data);
+//    H5Sclose(space);
+//    H5Tclose(memtype);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/histogram_bins", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &histogram_bins);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/max_velocity_estimate", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &max_velocity_estimate);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/max_vorticity_estimate", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &max_vorticity_estimate);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/niter_out", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &niter_out);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/niter_part", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &niter_part);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/niter_stat", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &niter_stat);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/niter_todo", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &niter_todo);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/nparticles", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &nparticles);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/nu", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &nu);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/nx", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &nx);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/ny", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &ny);
+//    H5Dclose(dset);
+//    dset = H5Dopen(parameter_file, "/parameters/nz", H5P_DEFAULT);
+//    H5Dread(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &nz);
+//    H5Dclose(dset);
+//    H5Fclose(parameter_file);
+//    return EXIT_SUCCESS;
+//}
+
+
+template <typename rnumber>
+int NSVE<rnumber>::grow_file_datasets()
+{
+    int file_problems = 0;
+
+    hid_t group;
+    group = H5Gopen(this->stat_file, "/statistics", H5P_DEFAULT);
+    int tincrement = this->niter_todo / this->niter_stat;
+    H5Ovisit(
+            group,
+            H5_INDEX_NAME,
+            H5_ITER_NATIVE,
+            grow_dataset_visitor,
+            &tincrement);
+    H5Gclose(group);
+    return file_problems;
+}
+
+template <typename rnumber>
+int NSVE<rnumber>::do_stats()
+{
+    hid_t stat_group;
+    if (this->myrank == 0)
+        stat_group = H5Gopen(
+                this->stat_file,
+                "statistics",
+                H5P_DEFAULT);
+    fs->compute_velocity(fs->cvorticity);
+    *tmp_vec_field = fs->cvelocity->get_cdata();
+    tmp_vec_field->compute_stats(
+            fs->kk,
+            stat_group,
+            "velocity",
+            fs->iteration / niter_stat,
+            max_velocity_estimate/sqrt(3));
+
+    *tmp_vec_field = fs->cvorticity->get_cdata();
+    tmp_vec_field->compute_stats(
+            fs->kk,
+            stat_group,
+            "vorticity",
+            fs->iteration / niter_stat,
+            max_vorticity_estimate/sqrt(3));
+
+    if (this->myrank == 0)
+        H5Gclose(stat_group);
+
+    if (myrank == 0)
+    {
+        std::string fname = (
+                std::string("stop_") +
+                std::string(simname));
+        {
+            struct stat file_buffer;
+            this->stop_code_now = (
+                    stat(fname.c_str(), &file_buffer) == 0);
+        }
+    }
+    MPI_Bcast(
+            &this->stop_code_now,
+            1,
+            MPI_C_BOOL,
+            0,
+            MPI_COMM_WORLD);
+    return EXIT_SUCCESS;
+}
+
+template class NSVE<float>;
+template class NSVE<double>;
+
diff --git a/bfps/cpp/full_code/NSVE.hpp b/bfps/cpp/full_code/NSVE.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..137f6838cf61b5a12b3b28bd5d87329196016e66
--- /dev/null
+++ b/bfps/cpp/full_code/NSVE.hpp
@@ -0,0 +1,126 @@
+/**********************************************************************
+*                                                                     *
+*  Copyright 2017 Max Planck Institute                                *
+*                 for Dynamics and Self-Organization                  *
+*                                                                     *
+*  This file is part of bfps.                                         *
+*                                                                     *
+*  bfps is free software: you can redistribute it and/or modify       *
+*  it under the terms of the GNU General Public License as published  *
+*  by the Free Software Foundation, either version 3 of the License,  *
+*  or (at your option) any later version.                             *
+*                                                                     *
+*  bfps is distributed in the hope that it will be useful,            *
+*  but WITHOUT ANY WARRANTY; without even the implied warranty of     *
+*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the      *
+*  GNU General Public License for more details.                       *
+*                                                                     *
+*  You should have received a copy of the GNU General Public License  *
+*  along with bfps.  If not, see <http://www.gnu.org/licenses/>       *
+*                                                                     *
+* Contact: Cristian.Lalescu@ds.mpg.de                                 *
+*                                                                     *
+**********************************************************************/
+
+
+
+#ifndef NSVE_HPP
+#define NSVE_HPP
+
+
+
+#include <cstdlib>
+#include "base.hpp"
+#include "vorticity_equation.hpp"
+#include "full_code/direct_numerical_simulation.hpp"
+
+int grow_single_dataset(hid_t dset, int tincrement)
+{
+    int ndims;
+    hsize_t space;
+    space = H5Dget_space(dset);
+    ndims = H5Sget_simple_extent_ndims(space);
+    hsize_t *dims = new hsize_t[ndims];
+    H5Sget_simple_extent_dims(space, dims, NULL);
+    dims[0] += tincrement;
+    H5Dset_extent(dset, dims);
+    H5Sclose(space);
+    delete[] dims;
+    return EXIT_SUCCESS;
+}
+
+herr_t grow_dataset_visitor(
+    hid_t o_id,
+    const char *name,
+    const H5O_info_t *info,
+    void *op_data)
+{
+    if (info->type == H5O_TYPE_DATASET)
+    {
+        hsize_t dset = H5Dopen(o_id, name, H5P_DEFAULT);
+        grow_single_dataset(dset, *((int*)(op_data)));
+        H5Dclose(dset);
+    }
+    return EXIT_SUCCESS;
+}
+
+template <typename rnumber>
+class NSVE: public direct_numerical_simulation
+{
+    public:
+
+        /* parameters that are read in read_parameters */
+        int checkpoints_per_file;
+        int dealias_type;
+        double dkx;
+        double dky;
+        double dkz;
+        double dt;
+        double famplitude;
+        double fk0;
+        double fk1;
+        int fmode;
+        char forcing_type[512];
+        int histogram_bins;
+        double max_velocity_estimate;
+        double max_vorticity_estimate;
+        int niter_out;
+        int niter_part;
+        int niter_stat;
+        int niter_todo;
+        int nparticles;
+        double nu;
+        int nx;
+        int ny;
+        int nz;
+
+        /* other stuff */
+        int iteration, checkpoint;
+        hid_t stat_file;
+        bool stop_code_now;
+        vorticity_equation<rnumber, FFTW> *fs;
+        field<rnumber, FFTW, THREE> *tmp_vec_field;
+        field<rnumber, FFTW, ONE> *tmp_scal_field;
+
+
+        NSVE(
+                const MPI_Comm COMMUNICATOR,
+                const std::string &simulation_name):
+            direct_numerical_simulation(
+                    COMMUNICATOR,
+                    simulation_name){}
+        ~NSVE(){}
+
+        int initialize(void);
+        int main_loop(void);
+        int finalize(void);
+
+        int read_iteration(void);
+        int write_iteration(void);
+        int read_parameters(void);
+        int grow_file_datasets(void);
+        int do_stats(void);
+};
+
+#endif//NSVE_HPP
+
diff --git a/bfps/cpp/full_code/direct_numerical_simulation.cpp b/bfps/cpp/full_code/direct_numerical_simulation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..edb6b88b9739c93c8ffc1669bb433f314bf36167
--- /dev/null
+++ b/bfps/cpp/full_code/direct_numerical_simulation.cpp
@@ -0,0 +1,12 @@
+#include "direct_numerical_simulation.hpp"
+
+direct_numerical_simulation::direct_numerical_simulation(
+        const MPI_Comm COMMUNICATOR,
+        const std::string &simulation_name):
+    comm(COMMUNICATOR),
+    simname(simulation_name)
+{
+    MPI_Comm_rank(this->comm, &this->myrank);
+    MPI_Comm_size(this->comm, &this->nprocs);
+}
+
diff --git a/bfps/cpp/full_code/direct_numerical_simulation.hpp b/bfps/cpp/full_code/direct_numerical_simulation.hpp
index a7833a14c636ea2c5d236f148967cb70ad0d2c0d..42ec84d58f06032a31f0012072a3c5a9000caef7 100644
--- a/bfps/cpp/full_code/direct_numerical_simulation.hpp
+++ b/bfps/cpp/full_code/direct_numerical_simulation.hpp
@@ -1,6 +1,6 @@
 /**********************************************************************
 *                                                                     *
-*  Copyright 2015 Max Planck Institute                                *
+*  Copyright 2017 Max Planck Institute                                *
 *                 for Dynamics and Self-Organization                  *
 *                                                                     *
 *  This file is part of bfps.                                         *
@@ -29,16 +29,23 @@
 
 
 #include "base.hpp"
-#include "field.hpp"
 
 class direct_numerical_simulation
 {
     public:
-        bool floating_point_exceptions;
         int myrank, nprocs;
         MPI_Comm comm;
 
-        std::
+        std::string simname;
+
+        direct_numerical_simulation(
+                const MPI_Comm COMMUNICATOR,
+                const std::string &simulation_name);
+        ~direct_numerical_simulation(){}
+
+        virtual int initialize(void) = 0;
+        virtual int main_loop(void) = 0;
+        virtual int finalize(void) = 0;
 };
 
 #endif//DIRECT_NUMERICAL_SIMULATION_HPP
diff --git a/bfps/cpp/full_code/main_code.hpp b/bfps/cpp/full_code/main_code.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..14029f4a85e7be47f097e4a3136a49d7cf5e8f06
--- /dev/null
+++ b/bfps/cpp/full_code/main_code.hpp
@@ -0,0 +1,161 @@
+/**********************************************************************
+*                                                                     *
+*  Copyright 2017 Max Planck Institute                                *
+*                 for Dynamics and Self-Organization                  *
+*                                                                     *
+*  This file is part of bfps.                                         *
+*                                                                     *
+*  bfps is free software: you can redistribute it and/or modify       *
+*  it under the terms of the GNU General Public License as published  *
+*  by the Free Software Foundation, either version 3 of the License,  *
+*  or (at your option) any later version.                             *
+*                                                                     *
+*  bfps is distributed in the hope that it will be useful,            *
+*  but WITHOUT ANY WARRANTY; without even the implied warranty of     *
+*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the      *
+*  GNU General Public License for more details.                       *
+*                                                                     *
+*  You should have received a copy of the GNU General Public License  *
+*  along with bfps.  If not, see <http://www.gnu.org/licenses/>       *
+*                                                                     *
+* Contact: Cristian.Lalescu@ds.mpg.de                                 *
+*                                                                     *
+**********************************************************************/
+
+
+
+#ifndef MAIN_CODE_HPP
+#define MAIN_CODE_HPP
+
+
+#include "base.hpp"
+#include "field.hpp"
+#include "scope_timer.hpp"
+
+template <class DNS>
+main_code(
+        const std::string &simname,
+        const bool floating_point_exceptions)
+{
+    /* floating point exception switch */
+    if (this->floating_point_exceptions)
+        feenableexcept(FE_INVALID | FE_OVERFLOW);
+    else
+        // using std::cerr because DEBUG_MSG requires myrank to be defined
+        std::cerr << ("FPE have been turned OFF" << std::endl;
+
+
+
+    /* initialize MPI environment */
+    int myrank, nprocs;
+#ifdef NO_FFTWOMP
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+    fftw_mpi_init();
+    fftwf_mpi_init();
+    DEBUG_MSG("There are %d processes\n", nprocs);
+#else
+    int mpiprovided;
+    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &mpiprovided);
+    assert(mpiprovided >= MPI_THREAD_FUNNELED);
+    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+    const int nThreads = omp_get_max_threads();
+    DEBUG_MSG("Number of threads for the FFTW = %d\n",
+              nThreads);
+    if (nThreads > 1){
+        fftw_init_threads();
+        fftwf_init_threads();
+    }
+    fftw_mpi_init();
+    fftwf_mpi_init();
+    DEBUG_MSG("There are %d processes and %d threads\n",
+              nprocs,
+              nThreads);
+    if (nThreads > 1){
+        fftw_plan_with_nthreads(nThreads);
+        fftwf_plan_with_nthreads(nThreads);
+    }
+#endif
+
+
+
+    /* import fftw wisdom */
+    if (myrank == 0)
+    {
+        char fname[256];
+        sprintf(fname, "%s_fftw_wisdom.txt", simname.c_str());
+        fftwf_import_wisdom_from_filename(fname);
+    }
+    fftwf_mpi_broadcast_wisdom(MPI_COMM_WORLD);
+
+
+
+    /* actually run DNS */
+    /*
+     * MPI environment:
+     * I could in principle pass myrank and nprocs instead of the global
+     * communicator, but it is possible that we'd like to do something more
+     * complex in the future (since I've done it in the past), and it's not
+     * expensive to keep several copies of myrank and nprocs.
+     *
+     * usage of assert:
+     * we could use assert here, but I assume that any problems we can still
+     * recover from should not be important enough to not clean up fftw and MPI
+     * things.
+     */
+    DNS *dns(
+            MPI_COMM_WORLD,
+            simname);
+    int return_value;
+    return_value = dns->initialize();
+    if (return_value == EXIT_SUCCESS)
+        return_value = dns->main_loop();
+    else
+        DEBUG_MSG("problem calling dns->initialize(), return value is %d",
+                  return_value);
+    if (return_value == EXIT_SUCCESS)
+        return_value = dns->finalize();
+    else
+        DEBUG_MSG("problem calling dns->main_loop(), return value is %d",
+                  return_value);
+    if (return_value != EXIT_SUCCESS)
+        DEBUG_MSG("problem calling dns->finalize(), return value is %d",
+                  return_value);
+
+
+
+    /* export fftw wisdom */
+    fftwf_mpi_gather_wisdom(MPI_COMM_WORLD);
+    MPI_Barrier(MPI_COMM_WORLD);
+    if (myrank == 0)
+    {
+        char fname[256];
+        sprintf(fname, "%s_fftw_wisdom.txt", simname);
+        fftwf_export_wisdom_to_filename(fname);
+    }
+
+
+
+    /* clean up */
+    fftwf_mpi_cleanup();
+    fftw_mpi_cleanup();
+#ifndef NO_FFTWOMP
+    if (nbThreads > 1){
+        fftw_cleanup_threads();
+        fftwf_cleanup_threads();
+    }
+#endif
+#ifdef USE_TIMINGOUTPUT
+    global_timer_manager.show(MPI_COMM_WORLD);
+    global_timer_manager.showHtml(MPI_COMM_WORLD);
+#endif
+
+    MPI_Finalize();
+    return EXIT_SUCCESS;
+}
+
+
+#endif//MAIN_CODE_HPP
+
diff --git a/setup.py b/setup.py
index e1d85b38a95a4a47186e74c44a1e4aeb52098da2..69753c37cab7ae3d84c2549a394e910a60841168 100644
--- a/setup.py
+++ b/setup.py
@@ -88,7 +88,9 @@ print('This is bfps version ' + VERSION)
 
 
 ### lists of files and MANIFEST.in
-src_file_list = ['vorticity_equation',
+src_file_list = ['full_code/direct_numerical_simulation',
+                 'full_code/NSVE',
+                 'vorticity_equation',
                  'field',
                  'kspace',
                  'field_layout',
@@ -130,6 +132,8 @@ particle_headers = [
         'cpp/particles/particles_system.hpp',
         'cpp/particles/particles_utils.hpp']
 
+full_code_headers = ['cpp/full_code/main_code.hpp']
+
 header_list = (['cpp/base.hpp'] +
                ['cpp/fftw_interface.hpp'] +
                ['cpp/bfps_timer.hpp'] +
@@ -137,7 +141,8 @@ header_list = (['cpp/base.hpp'] +
                ['cpp/shared_array.hpp'] +
                ['cpp/' + fname + '.hpp'
                 for fname in src_file_list] +
-               particle_headers)
+               particle_headers +
+               full_code_headers)
 
 with open('MANIFEST.in', 'w') as manifest_in_file:
     for fname in (['bfps/cpp/' + ff + '.cpp' for ff in src_file_list] +