diff --git a/src/classification/ConvexHull1D.cpp b/src/classification/ConvexHull1D.cpp
index ca3d9fed19aecf314040c07c99d9fa9a3998d13c..6c01621e6bd550e051788c62f1114430bea39ba7 100644
--- a/src/classification/ConvexHull1D.cpp
+++ b/src/classification/ConvexHull1D.cpp
@@ -11,7 +11,7 @@ ConvexHull1D::ConvexHull1D() :
     _n_class(0)
 {}
 
-ConvexHull1D::ConvexHull1D(const std::vector<int>& sizes, double* prop) :
+ConvexHull1D::ConvexHull1D(const std::vector<int>& sizes, const double* prop) :
     _sorted_value(std::accumulate(sizes.begin(), sizes.end(), 0), 0.0),
     _cls_max(),
     _cls_min(),
@@ -24,7 +24,7 @@ ConvexHull1D::ConvexHull1D(const std::vector<int>& sizes, double* prop) :
     initialize_prop(sizes, prop);
 }
 
-void ConvexHull1D::initialize_prop(const std::vector<int>& sizes, double* prop)
+void ConvexHull1D::initialize_prop(const std::vector<int>& sizes, const double* prop)
 {
     _n_task = sizes.size();
 
@@ -42,7 +42,7 @@ void ConvexHull1D::initialize_prop(const std::vector<int>& sizes, double* prop)
     for(int tt = 0; tt < sizes.size(); ++tt)
     {
         int start_original = start;
-        util_funcs::argsort(_sorted_prop_inds.data() + start, _sorted_prop_inds.data() + start + sizes[tt], prop + start);
+        util_funcs::argsort<double>(_sorted_prop_inds.data() + start, _sorted_prop_inds.data() + start + sizes[tt], prop + start);
         for(int pp = start + 1; pp < start_original + sizes[tt]; ++pp)
         {
             if(prop[_sorted_prop_inds[pp]] != prop[_sorted_prop_inds[pp - 1]])
@@ -146,5 +146,5 @@ double ConvexHull1D::overlap_1d(double* value, double width)
             }
         }
     }
-    return util_funcs::mean(_task_scores);
+    return util_funcs::mean<double>(_task_scores);
 }
diff --git a/src/classification/ConvexHull1D.hpp b/src/classification/ConvexHull1D.hpp
index ffc914a43f9a6f2bb75fb958efbf2d2291bd2d91..b4ff3921449771bba8ea46f42c1bc005656d9988 100644
--- a/src/classification/ConvexHull1D.hpp
+++ b/src/classification/ConvexHull1D.hpp
@@ -36,7 +36,7 @@ public:
      * @param sizes The size of the tasks
      * @param prop The pointer to the property vector
      */
-    ConvexHull1D(const std::vector<int>& sizes, double* prop);
+    ConvexHull1D(const std::vector<int>& sizes, const double* prop);
 
     /**
      * @brief Initialize the projection objects
@@ -49,14 +49,7 @@ public:
      * @param sizes The size of the tasks
      * @param prop The pointer to the property vector
      */
-    void initialize_prop(const std::vector<int>& sizes, double* prop);
-
-    /**
-     * @brief Change the property vectors
-     *
-     * @param prop The new property
-     */
-    void reset_prop(double* prop);
+    void initialize_prop(const std::vector<int>& sizes, const double* prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Pearson correlation
diff --git a/src/descriptor_identifier/Model/Model.hpp b/src/descriptor_identifier/Model/Model.hpp
index 0035a3ad9f44dc5fa6e497b198d97b6a1a999d69..7a409abd6a5aa26694d3859fc5118f1b10c2c735 100644
--- a/src/descriptor_identifier/Model/Model.hpp
+++ b/src/descriptor_identifier/Model/Model.hpp
@@ -202,7 +202,7 @@ public:
      * @brief The unit of the property
      * @return The unit of the property
      */
-    inline Unit prop_unit(){return _prop_unit;}
+    inline Unit prop_unit() const {return _prop_unit;}
 
     // DocString: model_prop_label
     /**
diff --git a/src/descriptor_identifier/Model/ModelClassifier.cpp b/src/descriptor_identifier/Model/ModelClassifier.cpp
index 12ea00d1be626dbc14e5ec6cc265ffe98ab72103..35a8fd4e2ab7e04ac7d21243c684d4d592a7387f 100644
--- a/src/descriptor_identifier/Model/ModelClassifier.cpp
+++ b/src/descriptor_identifier/Model/ModelClassifier.cpp
@@ -422,11 +422,11 @@ void ModelClassifier::set_train_test_error()
         // Set up sorted data for LP claculations
         std::vector<int> inds_train(_task_sizes_train[tt], 0);
         std::iota(inds_train.begin(), inds_train.end(), task_start_train);
-        util_funcs::argsort(inds_train.data(), inds_train.data() + inds_train.size(), &_prop_train[task_start_train]);
+        util_funcs::argsort<double>(inds_train.data(), inds_train.data() + inds_train.size(), &_prop_train[task_start_train]);
 
         std::vector<int> inds_test(_task_sizes_test[tt], 0);
         std::iota(inds_test.begin(), inds_test.end(), task_start_test);
-        util_funcs::argsort(inds_test.data(), inds_test.data() + inds_test.size(), &_prop_test[task_start_test]);
+        util_funcs::argsort<double>(inds_test.data(), inds_test.data() + inds_test.size(), &_prop_test[task_start_test]);
 
         std::vector<std::vector<int>> inds_class_train(1, {inds_train[0]});
 
diff --git a/src/descriptor_identifier/Model/ModelRegressor.cpp b/src/descriptor_identifier/Model/ModelRegressor.cpp
index ecfc9aa64d28c62593e6f072fb2a85fcd3d8cf84..4d68e0f1a59beb7583a00d70a32c6e36baaa99d6 100644
--- a/src/descriptor_identifier/Model/ModelRegressor.cpp
+++ b/src/descriptor_identifier/Model/ModelRegressor.cpp
@@ -562,12 +562,12 @@ double ModelRegressor::mape()
 {
     std::vector<double> percent_error(_train_error.size(), 0.0);
     std::transform(_train_error.begin(), _train_error.end(), _prop_train.begin(), percent_error.begin(), [](double e, double p){return std::abs(e / p);});
-    return util_funcs::mean(percent_error);
+    return util_funcs::mean<double>(percent_error);
 }
 
 double ModelRegressor::test_mape()
 {
     std::vector<double> percent_error(_test_error.size(), 0.0);
     std::transform(_test_error.begin(), _test_error.end(), _prop_test.begin(), percent_error.begin(), [](double e, double p){return std::abs(e / p);});
-    return util_funcs::mean(percent_error);
+    return util_funcs::mean<double>(percent_error);
 }
diff --git a/src/descriptor_identifier/SISSO_DI/SISSOClassifier.cpp b/src/descriptor_identifier/SISSO_DI/SISSOClassifier.cpp
index 712b7c44748753a46ab603817c0c9be18383f7dd..75678fef4988e6c383ddd790a5b3fd9f2a7addff 100644
--- a/src/descriptor_identifier/SISSO_DI/SISSOClassifier.cpp
+++ b/src/descriptor_identifier/SISSO_DI/SISSOClassifier.cpp
@@ -36,7 +36,7 @@ void SISSOClassifier::check_prop_test(std::vector<double> prop, std::vector<doub
 
 void SISSOClassifier::setup_prop(std::vector<double> prop, std::vector<double> prop_test)
 {
-    std::vector<int> inds = util_funcs::argsort(prop);
+    std::vector<int> inds = util_funcs::argsort<double>(prop);
     std::map<double, double> class_map;
     double class_num = 0;
     prop[inds[0]] = 0;
@@ -82,7 +82,7 @@ std::vector<LPWrapper> SISSOClassifier::setup_lp(int n_dim)
         std::vector<int> inds(_task_sizes_train[tt]);
         std::iota(inds.begin(), inds.end(), task_start);
 
-        util_funcs::argsort(inds.data(), inds.data() + inds.size(), &_prop[task_start]);
+        util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), &_prop[task_start]);
         _sample_inds_to_sorted_dmat_inds[inds[0]] = task_start;
 
         int cls_start = 0;
@@ -317,7 +317,7 @@ void SISSOClassifier::l0_norm(std::vector<double>& prop, int n_dim)
         [&max_dist](double margin, double score){return score + (1.0 - margin / max_dist);}
     );
 
-    inds = util_funcs::argsort(scores);
+    inds = util_funcs::argsort<double>(scores);
 
     std::vector<model_node_ptr> min_nodes(n_dim);
     std::vector<ModelClassifier> models;
diff --git a/src/descriptor_identifier/SISSO_DI/SISSOLogRegressor.cpp b/src/descriptor_identifier/SISSO_DI/SISSOLogRegressor.cpp
index 55c58e8cc512be4503552a921e9f728a8cb1afc3..855ab6c71311c805d6e9d5354e2ea7654c4cca46 100644
--- a/src/descriptor_identifier/SISSO_DI/SISSOLogRegressor.cpp
+++ b/src/descriptor_identifier/SISSO_DI/SISSOLogRegressor.cpp
@@ -166,7 +166,7 @@ void SISSOLogRegressor::l0_norm(std::vector<double>& prop, int n_dim)
     mpi::all_gather(*_mpi_comm, min_errors.data(), n_get_models, all_min_error);
     mpi::all_gather(*_mpi_comm, min_inds.data(), n_get_models * n_dim, all_min_inds);
 
-    inds = util_funcs::argsort(all_min_error);
+    inds = util_funcs::argsort<double>(all_min_error);
     for(int rr = 0; rr < n_get_models; ++rr)
     {
         std::vector<int> indexes(n_dim);
diff --git a/src/descriptor_identifier/SISSO_DI/SISSORegressor.cpp b/src/descriptor_identifier/SISSO_DI/SISSORegressor.cpp
index f5e4ce67f24464347ece10be88be2d5e0ebc717e..35ed8000699c4f2789a1e7d91f492b43c82e6645 100644
--- a/src/descriptor_identifier/SISSO_DI/SISSORegressor.cpp
+++ b/src/descriptor_identifier/SISSO_DI/SISSORegressor.cpp
@@ -224,7 +224,7 @@ void SISSORegressor::l0_norm(std::vector<double>& prop, int n_dim)
     mpi::all_gather(*_mpi_comm, min_errors.data(), n_get_models, all_min_error);
     mpi::all_gather(*_mpi_comm, min_inds.data(), n_get_models * n_dim, all_min_inds);
 
-    inds = util_funcs::argsort(all_min_error);
+    inds = util_funcs::argsort<double>(all_min_error);
     for(int rr = 0; rr < n_get_models; ++rr)
     {
         std::vector<int> indexes(n_dim);
diff --git a/src/feature_creation/feature_space/FeatureSpace.cpp b/src/feature_creation/feature_space/FeatureSpace.cpp
index 4b2678b64c256fbb4470f5be7150128ad33c2e02..7cac0863a55b362c1b39d724d8120d1ab3a84e7a 100644
--- a/src/feature_creation/feature_space/FeatureSpace.cpp
+++ b/src/feature_creation/feature_space/FeatureSpace.cpp
@@ -390,7 +390,7 @@ void FeatureSpace::generate_feature_space()
                 _mpi_comm->barrier();
                 project_funcs::project_r(_prop.data(), _scores.data(), _phi, _task_sizes, 1);
                 _scores.erase(_scores.begin(), _scores.begin() + _start_gen[_start_gen.size() - 1]);
-                inds = util_funcs::argsort(_scores);
+                inds = util_funcs::argsort<double>(_scores);
 
                 std::vector<int> del_inds;
 
@@ -457,7 +457,7 @@ void FeatureSpace::generate_feature_space()
                     }
                 }
 
-                inds = util_funcs::argsort(del_inds);
+                inds = util_funcs::argsort<int>(del_inds);
                 for(int ii = 0; ii < inds.size(); ++ii)
                 {
                     _phi.erase(_phi.begin() - del_inds[inds[ii]]);
@@ -689,7 +689,7 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
             std::vector<double> scores(generated_phi.size());
             _project_no_omp(prop, scores.data(), generated_phi, _task_sizes, size / _n_samp);
 
-            std::vector<int> inds = util_funcs::argsort(scores);
+            std::vector<int> inds = util_funcs::argsort<double>(scores);
 
             int ii = 0;
             while((ii < inds.size()) && (scores[inds[ii]] < -1.0))
@@ -809,7 +809,7 @@ void FeatureSpace::sis(std::vector<double>& prop)
     }
 
     // Sort the scores to get inds
-    std::vector<int> inds = util_funcs::argsort(_scores);
+    std::vector<int> inds = util_funcs::argsort<double>(_scores);
     int ii = 0;
     int cur_feat_local = 0;
     double cur_score = 0.0;
@@ -949,7 +949,7 @@ void FeatureSpace::sis(std::vector<double>& prop)
 
         cur_feat_local = 0;
         // Move selected features into _phi_selected and add the features to the output files
-        inds = util_funcs::argsort(scores_sel);
+        inds = util_funcs::argsort<double>(scores_sel);
         for(auto& ind : inds)
         {
             node_value_arrs::clear_temp_reg();
diff --git a/src/feature_creation/feature_space/FeatureSpace.hpp b/src/feature_creation/feature_space/FeatureSpace.hpp
index e41928f9e3a9702069ca9fed1cee533677f937a4..557fb2cfbc8dd7d544a55475fbd10062a6ac12eb 100644
--- a/src/feature_creation/feature_space/FeatureSpace.hpp
+++ b/src/feature_creation/feature_space/FeatureSpace.hpp
@@ -65,10 +65,10 @@ class FeatureSpace
     const std::string _feature_space_file; //!< File to store information about the selected features
     const std::string _feature_space_summary_file; //!< File to store information about the selected features
 
-    std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)> _project; //!< Function used to calculate the scores for SIS
-    std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)> _project_no_omp; //!< Function used to calculate the scores for SIS without changing omp environment
-    std::function<bool(double*, int, double, std::vector<double>&, double, int, int)> _is_valid; //!< Function used to calculate the scores for SIS
-    std::function<bool(double*, int, double, std::vector<node_ptr>&, std::vector<double>&, double)> _is_valid_feat_list; //!< Function used to calculate the scores for SIS without changing omp environment
+    std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)> _project; //!< Function used to calculate the scores for SIS
+    std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)> _project_no_omp; //!< Function used to calculate the scores for SIS without changing omp environment
+    std::function<bool(const double*, const int, const double, const std::vector<double>&, const double, const int, const int)> _is_valid; //!< Function used to calculate the scores for SIS
+    std::function<bool(const double*, const int, const double, const std::vector<node_ptr>&, const std::vector<double>&, const double)> _is_valid_feat_list; //!< Function used to calculate the scores for SIS without changing omp environment
 
     std::shared_ptr<MPI_Interface> _mpi_comm; //!< MPI communicator
 
diff --git a/src/feature_creation/node/FeatureNode.cpp b/src/feature_creation/node/FeatureNode.cpp
index b888799116a4f2a20f3db21786826323f303a1cd..8a516bef36cc474fac758ba98c3f8651d251b822 100644
--- a/src/feature_creation/node/FeatureNode.cpp
+++ b/src/feature_creation/node/FeatureNode.cpp
@@ -3,7 +3,7 @@
 FeatureNode::FeatureNode()
 {}
 
-FeatureNode::FeatureNode(unsigned long int feat_ind, std::string expr, std::vector<double> value, std::vector<double> test_value, Unit unit, bool set_val) :
+FeatureNode::FeatureNode(const unsigned long int feat_ind, const std::string expr, const std::vector<double> value, const std::vector<double> test_value, const Unit unit, const bool set_val) :
     Node(feat_ind, value.size(), test_value.size()),
     _value(value),
     _test_value(test_value),
@@ -21,7 +21,7 @@ FeatureNode::FeatureNode(unsigned long int feat_ind, std::string expr, std::vect
 FeatureNode::~FeatureNode()
 {}
 
-bool FeatureNode::is_const()
+bool FeatureNode::is_const() const
 {
     bool is_c = false;
     int pos = 0;
@@ -29,14 +29,14 @@ bool FeatureNode::is_const()
     double* val_ptr = value_ptr();
     for(auto& sz : node_value_arrs::TASK_SZ_TRAIN)
     {
-        double mean = util_funcs::mean(val_ptr + pos, sz);
+        double mean = util_funcs::mean<double>(val_ptr + pos, sz);
         is_c = is_c || std::all_of(val_ptr + pos, val_ptr + pos + sz, [&mean](double d){return std::abs(d - mean) < 1e-12;});
         pos += sz;
     }
     return is_c;
 }
 
-void FeatureNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void FeatureNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     if(add_sub_leaves.count(_expr) > 0)
     {
@@ -50,7 +50,7 @@ void FeatureNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leav
     ++expected_abs_tot;
 }
 
-void FeatureNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void FeatureNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     if(div_mult_leaves.count(_expr) > 0)
     {
@@ -64,14 +64,14 @@ void FeatureNode::update_div_mult_leaves(std::map<std::string, double>& div_mult
     expected_abs_tot += std::abs(fact);
 }
 
-std::map<int, int> FeatureNode::primary_feature_decomp()
+std::map<int, int> FeatureNode::primary_feature_decomp() const
 {
     std::map<int, int> pf_decomp;
     pf_decomp[_arr_ind] = 1;
     return pf_decomp;
 }
 
-void FeatureNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp)
+void FeatureNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp) const
 {
     if(pf_decomp.count(_arr_ind) > 0)
     {
diff --git a/src/feature_creation/node/FeatureNode.hpp b/src/feature_creation/node/FeatureNode.hpp
index 752642b485833f94f93b7ebcf6da488bcb74d029..477e36cddaf94034544781917df4ddb06784a81a 100644
--- a/src/feature_creation/node/FeatureNode.hpp
+++ b/src/feature_creation/node/FeatureNode.hpp
@@ -71,7 +71,14 @@ public:
      * @param unit Unit of the feature
      * @param set_val if true set the value inside the VALUES_ARR object
      */
-    FeatureNode(unsigned long int feat_ind, std::string expr, std::vector<double> value, std::vector<double> test_value, Unit unit, bool set_val = true);
+    FeatureNode(
+        const unsigned long int feat_ind,
+        const std::string expr,
+        const std::vector<double> value,
+        const std::vector<double> test_value,
+        const Unit unit,
+        const bool set_val=true
+    );
     #ifdef PY_BINDINGS
     /**
      * @brief Constructs a feature node using numpy arrays (cpp definition in <python/feature_creation/FeatureNode.cpp)
@@ -132,7 +139,7 @@ public:
      * @brief Get the list of feature expressions
      * @return vector storing the expressions for all primary features that show up in feature in the order they appear in the postfix notation
      */
-    virtual inline std::vector<std::string> get_x_in_expr_list(){return std::vector<std::string>(1, _expr);}
+    virtual inline std::vector<std::string> get_x_in_expr_list() const {return std::vector<std::string>(1, _expr);}
 
     /**
      * @brief return the number of leaves in the feature
@@ -140,13 +147,7 @@ public:
      * @param cur_n_leaves The current number of primary features that are inside the feature
      * @return total number of leaves of the binary expression tree
      */
-    virtual inline int n_leaves(int cur_n_leaves = 0){return cur_n_leaves + 1;}
-
-    // DocString: feat_node_expr_1
-    /**
-     * @brief Get the string expression used to represent the primary feature
-     */
-    inline std::string expr(){return _expr;}
+    virtual inline int n_leaves(int cur_n_leaves = 0) const {return cur_n_leaves + 1;}
 
     // DocString: feat_node_expr_const
     /**
@@ -158,23 +159,23 @@ public:
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return str_utils::latexify(_expr);}
+    inline std::string get_latex_expr() const {return str_utils::latexify(_expr);}
 
     // DocString: feat_node_unit
     /**
      * @brief The unit of the primary feature
      */
-    inline Unit unit(){return _unit;}
+    inline Unit unit() const {return _unit;}
 
     /**
      * @brief Get the training data of the feature
      */
-    inline std::vector<double> value(){return _value;}
+    inline std::vector<double> value() const {return _value;}
 
     /**
      * @brief Get the test data for the feature
      */
-    inline std::vector<double> test_value(){return _test_value;}
+    inline std::vector<double> test_value() const {return _test_value;}
 
     // DocString: feat_node_set_value
     /**
@@ -183,7 +184,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false)
+    inline void set_value(int offset=-1, const bool for_comp=false) const
     {
         std::copy_n(_value.data(), _n_samp, value_ptr());
     }
@@ -195,16 +196,19 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false)
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const
     {
-        if(!_selected) std::copy_n(_test_value.data(), _n_test_samp, test_value_ptr());
+        if(!_selected)
+        {
+            std::copy_n(_test_value.data(), _n_test_samp, test_value_ptr());
+        }
     }
 
     // DocString: feat_node_is_nan
     /**
      * @brief Check if the feature contains NaN
      */
-    inline bool is_nan()
+    inline bool is_nan() const
     {
         return std::any_of(value_ptr(), value_ptr() + _n_samp, [](double d){return !std::isfinite(d);});
     }
@@ -213,12 +217,12 @@ public:
     /**
      * @brief Check if feature is constant
      */
-    bool is_const();
+    bool is_const() const;
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::FEAT;}
+    inline NODE_TYPE type() const {return NODE_TYPE::FEAT;}
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -226,7 +230,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false)
+    virtual inline double* value_ptr(int offset=-1, const bool for_comp=false) const
     {
         return _selected ? node_value_arrs::get_d_matrix_ptr(_d_mat_ind) : node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);
     }
@@ -237,7 +241,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false)
+    virtual inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const
     {
         return node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);
     }
@@ -248,20 +252,20 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return cur_rung;}
+    inline int rung(int cur_rung = 0) const {return cur_rung;}
 
     /**
      * @brief Get the primary feature decomposition of a feature
      * @return A map representing the primary feature comprising a feature
      */
-    std::map<int, int> primary_feature_decomp();
+    std::map<int, int> primary_feature_decomp() const;
 
     /**
      * @brief Update the primary feature decomposition of a feature
      *
      * @param pf_decomp The primary feature decomposition of the feature calling this function.
      */
-    void update_primary_feature_decomp(std::map<int, int>& pf_decomp);
+    void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const;
 
     /**
      * @brief Converts a feature into a postfix expression (reverse polish notation)
@@ -272,7 +276,7 @@ public:
      * @param add_params If true add the parameters
      * @return The current postfix expression of the feature
      */
-    inline void update_postfix(std::string& cur_expr, bool add_params=true)
+    inline void update_postfix(std::string& cur_expr, const bool add_params=true) const
     {
         cur_expr = get_postfix_term() + "|" + cur_expr;
     }
@@ -281,14 +285,14 @@ public:
      * @brief Get the three character representation of the operator
      * @return the three character representation of the operator
      */
-    inline std::string get_postfix_term(){return std::to_string(_feat_ind);}
+    inline std::string get_postfix_term() const {return std::to_string(_feat_ind);}
 
     //DocString: feat_node_nfeats
     /**
      * @brief Number of features used for an operator node
      * @return the number of features for an operator node
      */
-    inline int n_feats(){return 0;}
+    inline int n_feats() const {return 0;}
 
     //DocString: feat_node_feat
     /**
@@ -297,7 +301,7 @@ public:
      * @param ind the index of the node to access
      * @return the ind feature in feature_list
      */
-    inline node_ptr feat(int ind)
+    inline node_ptr feat(const int ind) const
     {
         if(ind > 0)
         {
@@ -313,7 +317,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -323,20 +327,20 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return std::vector<double>();};
+    inline std::vector<double> parameters() const {return std::vector<double>();};
 
     /**
      * @brief Set the non-linear parameters
      * @param params The new parameters of the node
      * @param check_sz If true check the size of the parameters
      */
-    inline void set_parameters(std::vector<double> params, bool check_sz=true){};
+    inline void set_parameters(const std::vector<double> params, const bool check_sz=true){};
 
     /**
      * @brief returns the number of parameters for this feature
@@ -345,7 +349,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the number of parameters (_params.size())
      */
-    inline int n_params(int n_cur=0, int depth = 1){return n_cur;};
+    inline int n_params(int n_cur=0, int depth = 1) const {return n_cur;};
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -355,7 +359,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the current depth of the node on the Binary expression tree
      */
-    inline void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_value(offset);};
+    inline void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const {set_value(offset);};
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -366,7 +370,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @returns the pointer to the feature's data
      */
-    inline double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return value_ptr(offset);};
+    inline double* value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const {return value_ptr(offset);};
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -376,7 +380,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the current depth of the node on the Binary expression tree
      */
-    inline void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_test_value(offset);};
+    inline void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const {set_test_value(offset);};
 
     /**
      * @brief The pointer to where the feature's test data is stored
@@ -387,7 +391,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @returns the pointer to the feature's data
      */
-    inline double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return test_value_ptr(offset);};
+    inline double* test_value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const {return test_value_ptr(offset);};
 
     /**
      * @brief The expression of the feature
@@ -396,7 +400,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth = 1){return _expr;};
+    inline std::string expr(const double* params, const int depth = 1) const {return _expr;};
 
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
@@ -405,7 +409,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1){return str_utils::latexify(_expr);}
+    inline std::string get_latex_expr(const double* params, const int depth=1) const {return str_utils::latexify(_expr);}
 
     /**
      * @brief Set the bounds for the nl parameterization
@@ -415,7 +419,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1){};
+    inline void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth = 1) const {};
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -423,7 +427,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    void param_derivative(const double* params, double* dfdp){}
+    void param_derivative(const double* params, double* dfdp) const {}
 
     /**
      * @brief Get the parameter gradient for non-linear optimization
@@ -431,7 +435,7 @@ public:
      * @param grad pointer to the gradient storage
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    virtual void gradient(double* grad, double* dfdp)
+    virtual void gradient(double* grad, double* dfdp) const
     {
         throw std::logic_error("Asking for the gradient of non-parameterized feature");
     }
@@ -443,7 +447,7 @@ public:
      * @param dfdp pointer to where the feature derivative pointers are located
      * @param params pointer to the parameters vector
      */
-    inline void gradient(double* grad, double* dfdp, const double* params){};
+    inline void gradient(double* grad, double* dfdp, const double* params) const {};
     #endif
 };
 
diff --git a/src/feature_creation/node/ModelNode.cpp b/src/feature_creation/node/ModelNode.cpp
index c15869bcf6c02f3360bd44e25bb26d4be545cbaf..d6c20df9b4f06d92ea5b17a17d9712c5252e5384 100644
--- a/src/feature_creation/node/ModelNode.cpp
+++ b/src/feature_creation/node/ModelNode.cpp
@@ -4,15 +4,15 @@ ModelNode::ModelNode()
 {}
 
 ModelNode::ModelNode(
-    unsigned long int feat_ind,
-    unsigned long int rung,
-    std::string expr,
-    std::string latex_expr,
-    std::string expr_postfix,
-    std::vector<double> value,
-    std::vector<double> test_value,
-    std::vector<std::string> x_in_expr_list,
-    Unit unit
+    const unsigned long int feat_ind,
+    const unsigned long int rung,
+    const std::string expr,
+    const std::string latex_expr,
+    const std::string expr_postfix,
+    const std::vector<double> value,
+    const std::vector<double> test_value,
+    const std::vector<std::string> x_in_expr_list,
+    const Unit unit
 ) :
     FeatureNode(feat_ind, expr, value, test_value, unit, false),
     _value_svm(_n_samp),
@@ -980,7 +980,7 @@ std::vector<double> ModelNode::eval(std::map<std::string, std::vector<double>> x
 ModelNode::~ModelNode()
 {}
 
-void ModelNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void ModelNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     if(add_sub_leaves.count(_expr) > 0)
     {
@@ -994,7 +994,7 @@ void ModelNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves
     ++expected_abs_tot;
 }
 
-void ModelNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void ModelNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     if(div_mult_leaves.count(_expr) > 0)
     {
@@ -1008,7 +1008,7 @@ void ModelNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_l
     expected_abs_tot += std::abs(fact);
 }
 
-std::map<int, int> ModelNode::primary_feature_decomp()
+std::map<int, int> ModelNode::primary_feature_decomp() const
 {
     std::map<int, int> pf_decomp;
     std::vector<std::string> split_postfix = str_utils::split_string_trim(_expr_postfix, "|");
@@ -1034,7 +1034,7 @@ std::map<int, int> ModelNode::primary_feature_decomp()
     return pf_decomp;
 }
 
-void ModelNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp)
+void ModelNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp) const
 {
     pf_decomp.clear();
     std::vector<std::string> split_postfix = str_utils::split_string_trim(_expr_postfix, "|");
diff --git a/src/feature_creation/node/ModelNode.hpp b/src/feature_creation/node/ModelNode.hpp
index 805b69d5bb1c43393bf48aebe3654a519ac18f8a..b70d0c3f945fb06989182f2b2d22585e057d1800 100644
--- a/src/feature_creation/node/ModelNode.hpp
+++ b/src/feature_creation/node/ModelNode.hpp
@@ -79,15 +79,15 @@ public:
      * @param unit Unit of the feature
      */
     ModelNode(
-        unsigned long int feat_ind,
-        unsigned long int rung,
-        std::string expr,
-        std::string latex_expr,
-        std::string expr_postfix,
-        std::vector<double> value,
-        std::vector<double> test_value,
-        std::vector<std::string> x_in_expr_list,
-        Unit unit
+        const unsigned long int feat_ind,
+        const unsigned long int rung,
+        const std::string expr,
+        const std::string latex_expr,
+        const std::string expr_postfix,
+        const std::vector<double> value,
+        const std::vector<double> test_value,
+        const std::vector<std::string> x_in_expr_list,
+        const Unit unit
     );
 
     /**
@@ -186,7 +186,7 @@ public:
      * @brief get the list of feature expressions for each of the leaves
      * @return Vector containing the expression for each leaf in the binary expression tree
      */
-    inline std::vector<std::string> get_x_in_expr_list(){return _x_in_expr_list;}
+    inline std::vector<std::string> get_x_in_expr_list() const {return _x_in_expr_list;}
 
     /**
      * @brief return the number of leaves in the feature
@@ -194,12 +194,12 @@ public:
      * @param cur_n_leaves The current number of primary features that are inside the feature
      * @return total number of leaves of the binary expression tree
      */
-    inline int n_leaves(int n_cur_leaves = 0){return _n_leaves;}
+    inline int n_leaves(int n_cur_leaves = 0) const {return _n_leaves;}
 
     /**
      * @return Value of the feature converted to a range of -1.0 to 1.0
      */
-    inline std::vector<double> svm_value(){return _value_svm;}
+    inline std::vector<double> svm_value() const {return _value_svm;}
 
     /**
      * @return pointer to the start of the vector storing the value of the feature transformed into a range of -1.0 to 1.0
@@ -209,7 +209,7 @@ public:
     /**
      * @return Value of the test value fo the feature converted to a range of -1.0 to 1.0
      */
-    inline std::vector<double> svm_test_value(){return _test_value_svm;}
+    inline std::vector<double> svm_test_value() const {return _test_value_svm;}
 
     /**
      * @return Pointer to the start of the vector storing the test value of the feature converted to a range of -1.0 to 1.0
@@ -219,12 +219,12 @@ public:
     /**
      * @return value used to map the b value from SVM from -1.0 to 1.0 range to the real one
      */
-    inline double remap_b_svm(double w){return w * _b_remap_svm;}
+    inline double remap_b_svm(double w) const {return w * _b_remap_svm;}
 
     /**
      * @return value used to map the w value from SVM from -1.0 to 1.0 range to the real one
      */
-    inline double remap_w_svm(double w){return w * _w_remap_svm;}
+    inline double remap_w_svm(double w) const {return w * _w_remap_svm;}
 
     // DocString: model_node_set_value
     /**
@@ -233,7 +233,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){return;}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {return;}
 
     // DocString: model_node_set_test_value
     /**
@@ -242,24 +242,24 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){return;}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {return;}
 
     // DocString: model_node_is_nan
     /**
      * @brief Check if the feature contains NaN
      */
-    inline bool is_nan(){return false;}
+    inline bool is_nan() const {return false;}
 
     // DocString: model_node_is_const
     /**
      * @brief Check if feature is constant
      */
-    inline bool is_const(){return false;}
+    inline bool is_const() const {return false;}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::MODEL_FEATURE;}
+    inline NODE_TYPE type() const {return NODE_TYPE::MODEL_FEATURE;}
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -267,7 +267,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return _value.data();}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false){return _value.data();}
 
     /**
      * @brief The pointer to where the feature's test data is stored
@@ -275,7 +275,31 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return _test_value.data();}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false){return _test_value.data();}
+
+    /**
+     * @brief The pointer to where the feature's training data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param for_comp(bool) If true then the evaluation is for comparing features
+     */
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const
+    {
+        throw std::logic_error("const version of value_ptr for ModelNode is impossible.");
+        return nullptr;
+    }
+
+    /**
+     * @brief The pointer to where the feature's test data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param for_comp(bool) If true then the evaluation is for comparing features
+     */
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const
+    {
+        throw std::logic_error("const version of test_value_ptr for ModelNode is impossible.");
+        return nullptr;
+    }
 
     // DocString: model_node_rung
     /**
@@ -283,32 +307,32 @@ public:
      *
      * @param cur_rung(int) The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return _rung;}
+    inline int rung(int cur_rung = 0) const {return _rung;}
 
     /**
      * @brief Update the primary feature decomposition of a feature
      *
      * @param pf_decomp The primary feature decomposition of the feature calling this function.
      */
-    void update_primary_feature_decomp(std::map<int, int>& pf_decomp);
+    void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const;
 
     /**
      * @brief Get the primary feature decomposition of a feature
      * @return A map representing the primary feature comprising a feature
      */
-    std::map<int, int> primary_feature_decomp();
+    std::map<int, int> primary_feature_decomp() const;
 
     /**
      * @brief Get the three character representation of the operator
      * @return the three character representation of the operator
      */
-    inline std::string get_postfix_term(){return _expr_postfix;}
+    inline std::string get_postfix_term() const {return _expr_postfix;}
 
     // DocString: model_node_latex_expr
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return _latex_expr.substr(1, _latex_expr.size() - 2);}
+    inline std::string get_latex_expr() const {return _latex_expr.substr(1, _latex_expr.size() - 2);}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -317,7 +341,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -327,7 +351,7 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PY_BINDINGS
     /**
diff --git a/src/feature_creation/node/Node.cpp b/src/feature_creation/node/Node.cpp
index 39d9bc596db207dbe9ab498132f07a09744ef8ae..663e3122d4be3d0b3efbd7004c2fce2591d48db0 100644
--- a/src/feature_creation/node/Node.cpp
+++ b/src/feature_creation/node/Node.cpp
@@ -3,7 +3,7 @@
 Node::Node()
 {}
 
-Node::Node(unsigned long int feat_ind, int n_samp, int n_test_samp) :
+Node::Node(const unsigned long int feat_ind, const int n_samp, const int n_test_samp) :
     _n_test_samp(n_test_samp),
     _n_samp(n_samp),
     _feat_ind(feat_ind),
@@ -15,15 +15,5 @@ Node::Node(unsigned long int feat_ind, int n_samp, int n_test_samp) :
 Node::~Node()
 {}
 
-void Node::set_standardized_value(int offset)
-{
-    set_value(offset);
-
-    double mean = util_funcs::mean(value_ptr(offset), _n_samp);
-    double stand_dev = util_funcs::stand_dev(value_ptr(offset), _n_samp, mean);
-
-    std::transform(value_ptr(offset), value_ptr(offset) + _n_samp, value_ptr(offset), [=](double vv){return (vv - mean) / stand_dev;});
-}
-
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(Node)
 
diff --git a/src/feature_creation/node/Node.hpp b/src/feature_creation/node/Node.hpp
index 865259f19ff37c0105f687ee900f414b9cd76fdc..fb1e7f0632d75e55e185c1a537de95a0ec2093cb 100644
--- a/src/feature_creation/node/Node.hpp
+++ b/src/feature_creation/node/Node.hpp
@@ -78,7 +78,7 @@ public:
      * @param n_samp number of samples in the node
      * @param n_samp number of test samples in the node
      */
-    Node(unsigned long int feat_ind, int n_samp, int n_test_samp);
+    Node(const unsigned long int feat_ind, const int n_samp, int n_test_samp);
 
     /**
      * @brief Copy Constructor
@@ -117,7 +117,7 @@ public:
      * @brief Get the list of feature expressions
      * @return vector storing the expressions for all primary features that show up in feature in the order they appear in the postfix notation
      */
-    virtual std::vector<std::string> get_x_in_expr_list() = 0;
+    virtual std::vector<std::string> get_x_in_expr_list() const = 0;
 
     /**
      * @brief return the number of leaves in the feature
@@ -125,7 +125,7 @@ public:
      * @param cur_n_leaves The current number of primary features that are inside the feature
      * @return total number of leaves of the binary expression tree
      */
-    virtual int n_leaves(int cur_n_leaves = 0) = 0;
+    virtual int n_leaves(const int cur_n_leaves = 0) const = 0;
 
     // DocString: node_reindex_1
     /**
@@ -134,54 +134,53 @@ public:
      *
      * @param ind(int) the new feature and array index
      */
-    inline void reindex(unsigned long int ind){_feat_ind = ind; _arr_ind = ind;}
+    inline void reindex(const unsigned long int ind){_feat_ind = ind; _arr_ind = ind;}
 
     // DocString: node_reindex_2
     /**
      * @brief Reindex the feature
      * @details re-index the feature to be continuous
      *
-     * @param ind(int) the new feature index
-     * @param arr_ind(int) the new array index
+     * @param ind(int) the new feature and array index
      */
-    inline void reindex(unsigned long int ind, unsigned long int arr_ind){_feat_ind = ind; _arr_ind = arr_ind;}
+    inline void reindex(const unsigned long int feat_ind, const unsigned long int arr_ind){_feat_ind = feat_ind; _arr_ind = arr_ind;}
 
     // DocString: node_samp
     /**
      * @brief The number of samples
      */
-    inline int n_samp(){return _n_samp;}
+    inline int n_samp() const {return _n_samp;}
 
     // DocString: node_test_samp
     /**
      * @brief The number of samples in the test set
      */
-    inline int n_test_samp(){return _n_test_samp;}
+    inline int n_test_samp() const {return _n_test_samp;}
 
     // DocString: node_feat_ind
     /**
      * @brief The feature index
      */
-    inline unsigned long int feat_ind(){return _feat_ind;}
+    inline unsigned long int feat_ind() const {return _feat_ind;}
 
     // DocString: node_arr_ind
     /**
      * @brief The feature array index
      */
-    inline unsigned long int arr_ind(){return _arr_ind;}
+    inline unsigned long int arr_ind() const {return _arr_ind;}
 
     // DocString: node_selected
     /**
      * @brief True if feature is selected
      */
-    inline bool selected(){return _selected;}
+    inline bool selected() const {return _selected;}
 
     /**
      * @brief Setter function for _selected
      *
      * @param sel is the feature selected?
      */
-    inline void set_selected(bool sel){_selected = sel;}
+    inline void set_selected(const bool sel){_selected = sel;}
 
     // DocString: node_d_mat_ind
     /**
@@ -189,46 +188,46 @@ public:
      *
      * @param ind new _d_mat_ind
      */
-    inline void set_d_mat_ind(int ind){_d_mat_ind = ind;}
+    inline void set_d_mat_ind(const int ind){_d_mat_ind = ind;}
 
     /**
      * @brief The descriptor matrix index
      */
-    inline int d_mat_ind(){return _d_mat_ind;}
+    inline int d_mat_ind() const{return _d_mat_ind;}
 
     // DocString: node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    virtual std::string expr() = 0;
+    virtual std::string expr() const = 0;
 
     // DocString: node_latex_expr
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
      */
-    virtual std::string get_latex_expr() = 0;
+    virtual std::string get_latex_expr() const = 0;
 
     // DocString: node_latex_expr
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
      */
-    inline std::string latex_expr(){return "$" + get_latex_expr() + "$";}
+    inline std::string latex_expr() const {return "$" + get_latex_expr() + "$";}
 
     // DocString: node_unit
     /**
      * @brief Get the unit for the overall feature (From root node down)
      */
-    virtual Unit unit() = 0;
+    virtual Unit unit() const = 0;
 
     /**
      * @brief Get the training data of the feature
      */
-    virtual std::vector<double> value() = 0;
+    virtual std::vector<double> value() const = 0;
 
     /**
      * @brief Get the test data for the feature
      */
-    virtual std::vector<double> test_value() = 0;
+    virtual std::vector<double> test_value() const = 0;
 
 
     // DocString: node_set_value
@@ -238,14 +237,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false) = 0;
-
-    /**
-     * @brief Set the values of the training data for the feature inside of the value storage arrays (standardized so mean = 0 and stand_dev = 1)
-     *
-     * @param offset(int) Key to determine which part of the temporary storage array to look into
-     */
-    void set_standardized_value(int offset = -1);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const = 0;
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -253,7 +245,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual double* value_ptr(int offset=-1, bool for_comp=false) = 0;
+    virtual double* value_ptr(int offset=-1, const bool for_comp=false) const = 0;
 
     // DocString: node_set_test_value
     /**
@@ -262,7 +254,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false) = 0;
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const = 0;
 
     /**
      * @brief The pointer to where the feature's test data is stored
@@ -270,24 +262,24 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual double* test_value_ptr(int offset=-1, bool for_comp=false) = 0;
+    virtual double* test_value_ptr(int offset=-1, const bool for_comp=false) const = 0;
 
     // DocString: node_is_nan
     /**
      * @brief Check if the feature contains NaN
      */
-    virtual bool is_nan() = 0;
+    virtual bool is_nan() const = 0;
 
     // DocString: node_is_const
     /**
      * @brief Check if feature is constant
      */
-    virtual bool is_const() = 0;
+    virtual bool is_const() const = 0;
 
     /**
      * @brief Returns the type of node this is
      */
-    virtual NODE_TYPE type() = 0;
+    virtual NODE_TYPE type() const = 0;
 
     // DocString: node_rung
     /**
@@ -295,20 +287,20 @@ public:
      *
      * @param cur_rung(int) The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    virtual int rung(int cur_rung = 0) = 0;
+    virtual int rung(const int cur_rung = 0) const = 0;
 
     /**
      * @brief Get the primary feature decomposition of a feature
      * @return A map representing the primary feature comprising a feature
      */
-    virtual std::map<int, int> primary_feature_decomp() = 0;
+    virtual std::map<int, int> primary_feature_decomp() const = 0;
 
     /**
      * @brief Update the primary feature decomposition of a feature
      *
      * @param pf_decomp The primary feature decomposition of the feature calling this function.
      */
-    virtual void update_primary_feature_decomp(std::map<int, int>& pf_decomp) = 0;
+    virtual void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const = 0;
 
     /**
      * @brief Converts a feature into a postfix expression (reverse polish notation)
@@ -319,20 +311,20 @@ public:
      * @param add_params If true include the parameters in teh postfix expression
      * @return The current postfix expression of the feature
      */
-    virtual void update_postfix(std::string& cur_expr, bool add_params=true) = 0;
+    virtual void update_postfix(std::string& cur_expr, const bool add_params=true) const = 0;
 
     // DocString: node_postfix_expr
     /**
      * @brief Get the postfix expression for the feature
      * @return The postfix string for the expression
      */
-    inline std::string postfix_expr(){std::string cur_expr = ""; update_postfix(cur_expr); return cur_expr.substr(0, cur_expr.size() - 1);}
+    inline std::string postfix_expr() const {std::string cur_expr = ""; update_postfix(cur_expr); return cur_expr.substr(0, cur_expr.size() - 1);}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    virtual std::string get_postfix_term() = 0;
+    virtual std::string get_postfix_term() const = 0;
 
     /**
      * @brief update the dictionary used to check if an Add/Sub/AbsDiff node is valid
@@ -341,7 +333,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    virtual void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot) = 0;
+    virtual void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const = 0;
 
     /**
      * @brief update the dictionary used to check if an Mult/Div node is valid
@@ -350,21 +342,21 @@ public:
      * @param fact amount to increment the dictionary by
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      */
-    virtual void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot) = 0;
+    virtual void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const = 0;
 
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters() = 0;
+    virtual std::vector<double> parameters() const = 0;
 
     /**
      * @brief Set the non-linear parameters
      * @param params The new parameters of the node
      * @param check_sz If true check the size of the parameters
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true) = 0;
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true) = 0;
 
     /**
      * @brief returns the number of parameters for this feature
@@ -373,7 +365,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the number of parameters (_params.size())
      */
-    virtual int n_params(int n_cur = 0, int depth = 1) = 0;
+    virtual int n_params(const int n_cur = 0, const int depth = 1) const = 0;
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -383,7 +375,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the current depth of the node in the binary expression tree (from the root)
      */
-    virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -394,7 +386,7 @@ public:
      * @param depth the current depth of the node in the binary expression tree (from the root)
      * @returns the pointer to the feature's data
      */
-    virtual double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual double* value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -404,7 +396,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the current depth of the node in the binary expression tree (from the root)
      */
-    virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief The pointer to where the feature's test data is stored
@@ -415,7 +407,7 @@ public:
      * @param depth the current depth of the node in the binary expression tree (from the root)
      * @returns the pointer to the feature's data
      */
-    virtual double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual double* test_value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief The expression of the feature
@@ -424,7 +416,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    virtual std::string expr(double* params, int depth = 1) = 0;
+    virtual std::string expr(const double* params, const int depth = 1) const = 0;
 
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
@@ -433,7 +425,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    virtual std::string get_latex_expr(double* params, int depth=1) = 0;
+    virtual std::string get_latex_expr(const double* params, const int depth=1) const = 0;
 
     /**
      * @brief Set the bounds for the nl parameterization
@@ -443,7 +435,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
+    virtual void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth = 1) const = 0;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -451,7 +443,7 @@ public:
      * @param params pointer to the parameters
      * @param samp_ind sample index number
      */
-    virtual void param_derivative(const double* params, double* dfdp) = 0;
+    virtual void param_derivative(const double* params, double* dfdp) const = 0;
 
     /**
      * @brief Get the parameter gradient for non-linear optimization
@@ -459,7 +451,7 @@ public:
      * @param grad pointer to the gradient storage
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    virtual void gradient(double* grad, double* dfdp) = 0;
+    virtual void gradient(double* grad, double* dfdp) const = 0;
 
     /**
      * @brief Get the parameter gradient for non-linear optimization
@@ -468,7 +460,7 @@ public:
      * @param dfdp pointer to where the feature derivative pointers are located
      * @param params pointer to the parameters vector
      */
-    virtual void gradient(double* grad, double* dfdp, const double* params) = 0;
+    virtual void gradient(double* grad, double* dfdp, const double* params) const = 0;
     #endif
 
     //DocString: node_nfeats
@@ -476,7 +468,7 @@ public:
      * @brief Number of features used for an operator node
      * @return the number of features for an operator node
      */
-    virtual int n_feats() = 0;
+    virtual int n_feats() const = 0;
 
     //DocString: node_feat
     /**
@@ -485,7 +477,7 @@ public:
      * @param ind the index of the node to access
      * @return the ind feature in feature_list
      */
-    virtual std::shared_ptr<Node> feat(int ind) = 0;
+    virtual std::shared_ptr<Node> feat(const int ind) const = 0;
 
     #ifdef PY_BINDINGS
 
diff --git a/src/feature_creation/node/operator_nodes/OperatorNode.hpp b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
index d005aa2fa9b1c55d35d08498af14b33c3ff7aebb..b699f035f35f462da887b2bd6581f64fe2b92d29 100644
--- a/src/feature_creation/node/operator_nodes/OperatorNode.hpp
+++ b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
@@ -74,7 +74,7 @@ public:
      * @param feats array of features that the operator will act on
      * @param feat_ind index of the feature
      */
-    OperatorNode(std::array<node_ptr, N> feats, unsigned long int feat_ind) :
+    OperatorNode(const std::array<node_ptr, N> feats, const unsigned long int feat_ind) :
         Node(feat_ind, feats[0]->n_samp(), feats[0]->n_test_samp()),
         _feats(feats)
     {}
@@ -116,7 +116,7 @@ public:
      * @brief Get the list of feature expressions
      * @return vector storing the expressions for all primary features that show up in feature in the order they appear in the postfix notation
      */
-    std::vector<std::string> get_x_in_expr_list()
+    std::vector<std::string> get_x_in_expr_list() const
     {
         std::vector<std::string> x_in_expr;
         for(auto& feat: _feats)
@@ -134,7 +134,7 @@ public:
      * @param cur_n_leaves The current number of primary features that are inside the feature
      * @return total number of leaves of the binary expression tree
      */
-    int n_leaves(int cur_n_leaves = 0)
+    int n_leaves(int cur_n_leaves = 0) const
     {
         return std::accumulate(_feats.begin(), _feats.end(), cur_n_leaves, [](int tot, node_ptr feat){return tot + feat->n_leaves();});
     }
@@ -143,24 +143,24 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    virtual std::string expr() = 0;
+    virtual std::string expr() const = 0;
 
     // DocString: node_latex_expr
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
      */
-    virtual std::string get_latex_expr() = 0;
+    virtual std::string get_latex_expr() const = 0;
 
     // DocString: op_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    virtual Unit unit() = 0;
+    virtual Unit unit() const = 0;
 
     /**
      * @brief Get the training data of the feature
      */
-    std::vector<double> value()
+    std::vector<double> value() const
     {
         std::vector<double> val(_n_samp, 0.0);
         std::copy_n(value_ptr(), _n_samp, val.data());
@@ -170,7 +170,7 @@ public:
     /**
      * @brief Get the test data for the feature
      */
-    std::vector<double> test_value()
+    std::vector<double> test_value() const
     {
         std::vector<double> val(_n_test_samp, 0.0);
         std::copy_n(test_value_ptr(), _n_test_samp, val.data());
@@ -184,7 +184,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false) = 0;
+    virtual void set_value(int offset=-1, const bool for_comp=false) const = 0;
 
     // DocString: op_node_set_test_value
     /**
@@ -193,7 +193,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false) = 0;
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const = 0;
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -204,7 +204,7 @@ public:
      *
      * @return pointer to the feature's training value
      */
-    virtual double* value_ptr(int offset=-1, bool for_comp=false)
+    virtual double* value_ptr(int offset=-1, const bool for_comp=false) const
     {
         if(_selected && (offset == -1))
         {
@@ -229,7 +229,7 @@ public:
      * @return pointer to the feature's test values
      */
 
-    virtual double* test_value_ptr(int offset=-1, bool for_comp=false)
+    virtual double* test_value_ptr(int offset=-1, const bool for_comp=false) const
     {
         offset += (offset == -1);
         if((rung() > node_value_arrs::N_RUNGS_STORED) && (node_value_arrs::temp_storage_test_reg(_arr_ind, rung(), offset, for_comp) != _feat_ind))
@@ -244,7 +244,7 @@ public:
     /**
      * @brief Check if the feature contains NaN
      */
-    inline bool is_nan()
+    inline bool is_nan() const
     {
         double* val_ptr = value_ptr();
         return std::any_of(val_ptr, val_ptr + _n_samp, [](double d){return !std::isfinite(d);});
@@ -254,7 +254,7 @@ public:
     /**
      * @brief Check if feature is constant
      */
-    bool is_const()
+    bool is_const() const
     {
         double* val_ptr = value_ptr();
 
@@ -274,18 +274,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    virtual int rung(int cur_rung = 0) = 0;
+    virtual int rung(int cur_rung = 0) const = 0;
 
     /**
      * @brief Returns the type of node this is
      */
-    virtual NODE_TYPE type() = 0;
+    virtual NODE_TYPE type() const = 0;
 
      /**
      * @brief Get the primary feature decomposition of a feature
      * @return A map representing the primary feature comprising a feature
      */
-    std::map<int, int> primary_feature_decomp()
+    std::map<int, int> primary_feature_decomp() const
     {
         std::map<int, int> pf_decomp;
         update_primary_feature_decomp(pf_decomp);
@@ -297,7 +297,7 @@ public:
      *
      * @param pf_decomp The primary feature decomposition of the feature calling this function.
      */
-    void update_primary_feature_decomp(std::map<int, int>& pf_decomp)
+    void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const
     {
         for(auto& feat : _feats)
         {
@@ -314,7 +314,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    virtual void update_postfix(std::string& cur_expr, bool add_params=true)
+    virtual void update_postfix(std::string& cur_expr, const bool add_params=true) const
     {
         std::stringstream postfix;
         postfix << get_postfix_term();
@@ -329,14 +329,14 @@ public:
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    virtual std::string get_postfix_term() = 0;
+    virtual std::string get_postfix_term() const = 0;
 
     //DocString: op_node_nfeats
      /**
      * @brief Number of features used for an operator node
      * @return the number of features for an operator node
      */
-    inline int n_feats(){return N;}
+    inline int n_feats() const {return N;}
 
     //DocString: op_node_feat
     /**
@@ -345,7 +345,7 @@ public:
      * @param ind the index of the node to access
      * @return the ind feature in feature_list
      */
-    inline node_ptr feat(int ind)
+    inline node_ptr feat(const int ind) const
     {
         if(ind > N)
         {
@@ -361,7 +361,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    virtual void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot) = 0;
+    virtual void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const = 0;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -371,13 +371,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    virtual void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot) = 0;
+    virtual void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const = 0;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters() = 0;
+    virtual std::vector<double> parameters() const = 0;
 
     //DocString: op_node_get_params
     /**
@@ -393,7 +393,7 @@ public:
      * @param params The new parameters to use for the feature
      * @param check_sz If true make sure the number of parameters matches the expected value
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true) = 0;
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true) = 0;
 
     // DocString: op_node_n_params
     /**
@@ -403,7 +403,7 @@ public:
      * @param depth the max depth of paramterization (in the binary expression tree)
      * @return the number of theoretical parameters
      */
-    virtual inline int n_params(int n_cur = 0, int depth = 1)
+    virtual inline int n_params(const int n_cur = 0, const int depth = 1) const
     {
         if(depth > nlopt_wrapper::MAX_PARAM_DEPTH)
         {
@@ -421,7 +421,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief The pointer to where the feature's training data is stored
@@ -432,7 +432,7 @@ public:
      * @param depth the max depth of paramterization (in the binary expression tree)
      * @returns the pointer to the feature's data
      */
-    double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
+    double* value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const
     {
         if(_selected && (offset == -1))
         {
@@ -452,7 +452,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+    virtual void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const = 0;
 
     /**
      * @brief The pointer to where the feature's test data is stored
@@ -463,7 +463,7 @@ public:
      * @param depth the max depth of paramterization (in the binary expression tree)
      * @returns the pointer to the feature's data
      */
-    double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
+    double* test_value_ptr(const double* params, int offset=-1, const bool for_comp=false, const int depth=0) const
     {
         offset += (offset == -1);
         set_test_value(params, offset, for_comp, depth);
@@ -478,7 +478,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    virtual std::string expr(double* params, int depth=1) = 0;
+    virtual std::string expr(const double* params, const int depth=1) const = 0;
 
     /**
      * @brief Get the latexified expression for the overall feature (From root node down)
@@ -486,7 +486,7 @@ public:
      * @param params parameter values for non-linear operations
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual std::string get_latex_expr(double* params, int depth=1) = 0;
+    virtual std::string get_latex_expr(const double* params, const int depth=1) const = 0;
 
     /**
      * @brief Set the bounds for the nl parameterization
@@ -496,7 +496,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
+    virtual void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth = 1) const = 0;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -504,7 +504,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    virtual void param_derivative(const double* params, double* dfdp) = 0;
+    virtual void param_derivative(const double* params, double* dfdp) const = 0;
 
     /**
      * @brief Get the parameter gradient for non-linear optimization
@@ -512,7 +512,7 @@ public:
      * @param grad pointer to the gradient storage
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    void gradient(double* grad, double* dfdp)
+    void gradient(double* grad, double* dfdp) const
     {
         if(n_params() == 0)
         {
@@ -531,7 +531,7 @@ public:
      * @param dfdp pointer to where the feature derivative pointers are located
      * @param params pointer to the parameters vector
      */
-    void gradient(double* grad, double* dfdp, const double* params)
+    void gradient(double* grad, double* dfdp, const double* params) const
     {
         int np = n_params();
         param_derivative(params, dfdp);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
index 6fce82413cb6022495cd22d2e1201feb011cde10..b1f9cd7888f68dbfbfa2a254c93dfc246556e885 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
@@ -1,7 +1,13 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp>
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsNode)
 
-void generateAbsNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateAbsNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Absolute value of an absolute value is the same thing
@@ -37,11 +43,11 @@ void generateAbsNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 AbsNode::AbsNode()
 {}
 
-AbsNode::AbsNode(node_ptr feat, unsigned long int feat_ind):
+AbsNode::AbsNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-AbsNode::AbsNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+AbsNode::AbsNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Absolute value of an absolute value is the same thing
@@ -71,7 +77,7 @@ AbsNode::AbsNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     }
 }
 
-void AbsNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void AbsNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -86,7 +92,7 @@ void AbsNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void AbsNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void AbsNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -101,7 +107,7 @@ void AbsNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void AbsNode::set_value(int offset, bool for_comp)
+void AbsNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -118,7 +124,7 @@ void AbsNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::abs(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void AbsNode::set_test_value(int offset, bool for_comp)
+void AbsNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::abs(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
index fa367dd5934725b23c3377222d489b95ad797fe2..299260c6ecd4f6f68e963862eb0abf21d18ee1fc 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    AbsNode(node_ptr feat, unsigned long int feat_ind);
+    AbsNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AbsNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    AbsNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: abs_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit();}
+    inline Unit unit() const {return _feats[0]->unit();}
 
     // DocString: abs_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "(|{}|)",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\left|{}\\right|\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: abs_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: abs_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::ABS;}
+    inline NODE_TYPE type() const {return NODE_TYPE::ABS;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "abs";}
+    inline std::string get_postfix_term() const {return "abs";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "|{:.10e}*{}{:+15.10e}|",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
@@ -227,7 +227,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    virtual void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -235,7 +235,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return util_funcs::sign(params[0] * vp + params[1]);});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateAbsNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateAbsNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
index 7ca22cdbc4922ab1033f5bf65183b0e857bb80fe..ee580b02aad6bca7c48fafcb72dd82693c5a085d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
@@ -3,7 +3,12 @@
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsParamNode)
 
 void generateAbsParamNode(
-    std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 )
 {
     ++feat_ind;
@@ -33,7 +38,9 @@ void generateAbsParamNode(
 AbsParamNode::AbsParamNode()
 {}
 
-AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+AbsParamNode::AbsParamNode(
+    const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound, std::shared_ptr<NLOptimizer> optimizer
+) :
     AbsNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -53,7 +60,7 @@ AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+AbsParamNode::AbsParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     AbsNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -61,7 +68,7 @@ AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, std::share
     get_parameters(optimizer);
 }
 
-AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+AbsParamNode::AbsParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     AbsNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -88,7 +95,7 @@ void AbsParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void AbsNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void AbsNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -115,7 +122,8 @@ void AbsNode::set_value(const double* params, int offset, bool for_comp, int dep
 
     allowed_op_funcs::abs(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
-void AbsNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+
+void AbsNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -134,7 +142,7 @@ void AbsNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void AbsNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -146,7 +154,7 @@ void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
 
-void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void AbsParamNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
@@ -158,3 +166,19 @@ void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void AbsParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
index 5ce76d08c49e32afcbe692258e89a1b80df86a8a..06088ada35b44ef5e2c6e85278d136f78db905d9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
@@ -62,7 +62,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    AbsParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -72,7 +78,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AbsParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    AbsParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -83,7 +89,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    AbsParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: abs_param_node_set_value
     /**
@@ -92,7 +98,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: abs_param_node_set_test_value
     /**
@@ -101,7 +107,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -112,7 +118,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -123,24 +129,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: abs_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: abs_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -173,7 +179,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Converts a feature into a postfix expression (reverse polish notation)
@@ -184,21 +190,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -212,7 +204,12 @@ public:
  * @param optimizer The optimizer used to find the parameters of the node
  */
 void generateAbsParamNode(
-    std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 );
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
index 71f41ac3ad063380d9b9637571b0e7731615d182..9b1238ab0cb0c91737f8951702c0144b48757e4a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
@@ -1,7 +1,7 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp>
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp>
 
-void generateAbsDiffNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateAbsDiffNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound)
 {
     ++feat_ind;
     // If the units do not match up the operation is invalid
@@ -61,11 +61,11 @@ void generateAbsDiffNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node
 AbsDiffNode::AbsDiffNode()
 {}
 
-AbsDiffNode::AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind):
+AbsDiffNode::AbsDiffNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {}
 
-AbsDiffNode::AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+AbsDiffNode::AbsDiffNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     // If the units do not match up the operation is invalid
@@ -124,7 +124,7 @@ AbsDiffNode::AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int fea
     }
 }
 
-void AbsDiffNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void AbsDiffNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -139,7 +139,7 @@ void AbsDiffNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leav
     ++expected_abs_tot;
 }
 
-void AbsDiffNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void AbsDiffNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -154,7 +154,7 @@ void AbsDiffNode::update_div_mult_leaves(std::map<std::string, double>& div_mult
     expected_abs_tot += std::abs(fact);
 }
 
-void AbsDiffNode::set_value(int offset, bool for_comp)
+void AbsDiffNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -171,7 +171,7 @@ void AbsDiffNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::abs_diff(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), _feats[1]->value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void AbsDiffNode::set_test_value(int offset, bool for_comp)
+void AbsDiffNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::abs_diff(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
index da79e12e5909ce10b7c4e4a17953f66e89d7dd8a..daf992eeee54a60b333e3da1671af756fa20042d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
@@ -49,7 +49,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind);
+    AbsDiffNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -61,19 +61,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound);
+    AbsDiffNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: abs_diff_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit();}
+    inline Unit unit() const {return _feats[0]->unit();}
 
     // DocString: abs_diff_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "(|{} - {}|)",
@@ -86,7 +86,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\left|{} - {}\\right|\\right)",
@@ -102,7 +102,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: abs_diff_node_set_test_value
     /**
@@ -111,7 +111,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: abs_diff_node_rung
     /**
@@ -119,18 +119,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
+    inline int rung(const int cur_rung=0) const {return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::ABS_DIFF;}
+    inline NODE_TYPE type() const {return NODE_TYPE::ABS_DIFF;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "abd";}
+    inline std::string get_postfix_term() const {return "abd";}
 
     /**
      * @brief Check if the feature will be valid, if it is then set the value
@@ -145,7 +145,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -155,13 +155,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -176,7 +176,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -186,7 +186,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -196,7 +196,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -205,7 +205,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "|{} - ({:.10e}*{}{:+15.10e})|",
@@ -223,7 +223,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
@@ -242,7 +242,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -250,7 +250,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
         double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
@@ -275,6 +275,6 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateAbsDiffNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateAbsDiffNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
index 346864074d1b5c059b9e4b6fbce0ba1b62203b87..eedcc449635eee619f78d6398fff67f0b6b59a39 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
@@ -4,11 +4,11 @@ BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsDiffParamNode)
 
 void generateAbsDiffParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 )
 {
@@ -59,7 +59,12 @@ AbsDiffParamNode::AbsDiffParamNode()
 {}
 
 AbsDiffParamNode::AbsDiffParamNode(
-    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    const node_ptr feat_1,
+    const node_ptr feat_2,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 ):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
@@ -98,14 +103,14 @@ AbsDiffParamNode::AbsDiffParamNode(
     }
 }
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
+AbsDiffParamNode::AbsDiffParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+AbsDiffParamNode::AbsDiffParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -120,7 +125,7 @@ void AbsDiffParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void AbsDiffNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void AbsDiffNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -158,7 +163,7 @@ void AbsDiffNode::set_value(const double* params, int offset, bool for_comp, int
     allowed_op_funcs::abs_diff(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
 
-void AbsDiffNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void AbsDiffNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -187,7 +192,7 @@ void AbsDiffNode::set_test_value(const double* params, int offset, bool for_comp
     );
 }
 
-void AbsDiffNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void AbsDiffNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
     {
@@ -197,3 +202,20 @@ void AbsDiffNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void AbsDiffParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[1]->update_postfix(cur_expr, false);
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
index 17a2db6209043ec7370a28afe96469001fb2d35d..961355ac9ec792bc60f1e76623ef6247feff1b3c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
@@ -59,7 +59,14 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    AbsDiffParamNode(
+        const node_ptr feat_1,
+        const node_ptr feat_2,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +77,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    AbsDiffParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -82,7 +89,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    AbsDiffParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: abs_diff_param_node_set_value
     /**
@@ -91,7 +98,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: abs_diff_param_node_set_test_value
     /**
@@ -100,7 +107,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -111,7 +118,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -122,24 +129,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: abs_diff_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: abs_diff_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -173,23 +180,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[1]->update_postfix(cur_expr, false);
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -205,11 +196,11 @@ public:
  */
 void generateAbsDiffParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 );
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
index 5200e3ae6d83818464eb5bb7143accbf81679eec..2e3246ae0438458977eb7a7b200aa012c910f89e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
@@ -1,6 +1,6 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp>
 
-void generateAddNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateAddNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound)
 {
     ++feat_ind;
     // If the input features are not of the same unit this operation is invalid
@@ -52,11 +52,11 @@ void generateAddNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 AddNode::AddNode()
 {}
 
-AddNode::AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind):
+AddNode::AddNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {}
 
-AddNode::AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+AddNode::AddNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     // If the input features are not of the same unit this operation is invalid
@@ -106,13 +106,13 @@ AddNode::AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
     }
 }
 
-void AddNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void AddNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     _feats[0]->update_add_sub_leaves(add_sub_leaves, pl_mn, expected_abs_tot);
     _feats[1]->update_add_sub_leaves(add_sub_leaves, pl_mn, expected_abs_tot);
 }
 
-void AddNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void AddNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -127,7 +127,7 @@ void AddNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void AddNode::set_value(int offset, bool for_comp)
+void AddNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -144,7 +144,7 @@ void AddNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::add(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), _feats[1]->value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void AddNode::set_test_value(int offset, bool for_comp)
+void AddNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::add(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
index 17d8f2e9a3d95b73ee471056ed95477efd9832c2..627121af7378da7855483babca680f03f326e319 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat_2 shared_ptr of the second feature to operate on (B)
      * @param feat_ind Index of the new feature
      */
-    AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind);
+    AddNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -58,19 +58,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound);
+    AddNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: add_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit();}
+    inline Unit unit() const {return _feats[0]->unit();}
 
     // DocString: add_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({} + {})",
@@ -83,7 +83,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({} + {}\\right)]",
@@ -99,7 +99,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: add_node_set_test_value
     /**
@@ -107,7 +107,7 @@ public:
      *
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: add_node_rung
     /**
@@ -115,18 +115,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
+    inline int rung(const int cur_rung=0) const {return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::ADD;}
+    inline NODE_TYPE type() const {return NODE_TYPE::ADD;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "add";}
+    inline std::string get_postfix_term() const {return "add";}
 
     /**
      * @brief Check if the feature will be valid, if it is then set the value
@@ -141,7 +141,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -151,13 +151,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,7 +172,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -182,7 +182,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -192,7 +192,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -201,7 +201,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "({} + {:.10e}*{}{:+15.10e})",
@@ -219,7 +219,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left({} + {:.3}*{}{:+8.3e}\\right)",
@@ -238,7 +238,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -246,7 +246,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp,  _n_samp, 1.0);}
+    inline void param_derivative(const double* params, double* dfdp) const {std::fill_n(dfdp,  _n_samp, 1.0);}
     #endif
 };
 
@@ -260,6 +260,6 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateAddNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateAddNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
index e45be946d3f26e2232966a340c8f9b421b7ccb01..6d10e5568fb90eb22419fb06c9cd8afa41e29dd5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
@@ -2,7 +2,15 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AddParamNode)
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateAddParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    const node_ptr feat_2,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, optimizer);
@@ -33,7 +41,12 @@ AddParamNode::AddParamNode()
 {}
 
 AddParamNode::AddParamNode(
-    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    const node_ptr feat_1,
+    const node_ptr feat_2,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 ) :
     AddNode(feat_1, feat_2, feat_ind)
 {
@@ -53,14 +66,14 @@ AddParamNode::AddParamNode(
     }
 }
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+AddParamNode::AddParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound) :
+AddParamNode::AddParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -75,7 +88,7 @@ void AddParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void AddNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void AddNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -113,7 +126,7 @@ void AddNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::add(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
 
-void AddNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void AddNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -147,7 +160,7 @@ void AddNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void AddNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[1] = 0.0;
     ub[1] = 0.0;
@@ -160,3 +173,20 @@ void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void AddParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[1]->update_postfix(cur_expr, false);
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
index f894680b4c60160dd5aa0575ee092b1baea9961a..e4846aa10b24b61f520781811537a96344a55e68 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
@@ -61,7 +61,14 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    AddParamNode(
+        const node_ptr feat_1,
+        const node_ptr feat_2,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -72,7 +79,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    AddParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -83,7 +90,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    AddParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: add_param_node_set_value
     /**
@@ -92,7 +99,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: add_param_node_set_test_value
     /**
@@ -101,7 +108,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -112,7 +119,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -123,24 +130,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: add_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: add_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -174,23 +181,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[1]->update_postfix(cur_expr, false);
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -206,11 +197,11 @@ public:
  */
 void generateAddParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 );
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
index 5cb95ea089e3e2f520c0dddb3b51221b7ff59060..1ebbfb7c2ee16ae77a588882a577be8280d640b8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp>
 
-void generateCbNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateCbNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Prevent possible repeats by combining other power operations together
@@ -27,11 +33,11 @@ void generateCbNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned lo
 CbNode::CbNode()
 {}
 
-CbNode::CbNode(node_ptr feat, unsigned long int feat_ind):
+CbNode::CbNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-CbNode::CbNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+CbNode::CbNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Prevent possible repeats by combining other power operations together
@@ -54,7 +60,7 @@ CbNode::CbNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double
 
 }
 
-void CbNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void CbNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -69,12 +75,12 @@ void CbNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, i
     ++expected_abs_tot;
 }
 
-void CbNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void CbNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact * 3.0, expected_abs_tot);
 }
 
-void CbNode::set_value(int offset, bool for_comp)
+void CbNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -90,7 +96,7 @@ void CbNode::set_value(int offset, bool for_comp)
 
     allowed_op_funcs::cb(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
-void CbNode::set_test_value(int offset, bool for_comp)
+void CbNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::cb(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
index 3f995e7e6b5d13728eb2e184b8c6715c31f909f6..d1eb8fd0a905aca529f25588aca2db55c65facac 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    CbNode(node_ptr feat, unsigned long int feat_ind);
+    CbNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CbNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    CbNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: cb_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit()^(3.0);}
+    inline Unit unit() const {return _feats[0]->unit()^(3.0);}
 
     // DocString: cb_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({}^3)",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({}^3\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cb_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cb_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::CB;}
+    inline NODE_TYPE type() const {return NODE_TYPE::CB;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "cb";}
+    inline std::string get_postfix_term() const {return "cb";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(({:.10e}*{}{:+15.10e})^3)",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\left({:.3e}{}{:+8.3e}\\right)^3\\right)",
@@ -227,7 +227,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -235,7 +235,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 3.0 * std::pow(params[0] * vp + params[1], 2.0);});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateCbNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateCbNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
index 9e0e587278f6e2711ed93d857d11d38ff01d482c..3da7e1c7831640a46b8380dca0b8935554929cce 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbParamNode)
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateCbParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, optimizer);
@@ -31,7 +37,13 @@ void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsign
 CbParamNode::CbParamNode()
 {}
 
-CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+CbParamNode::CbParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -51,7 +63,7 @@ CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bou
     }
 }
 
-CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+CbParamNode::CbParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -60,7 +72,7 @@ CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_
     set_value();
 }
 
-CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+CbParamNode::CbParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -75,7 +87,7 @@ void CbParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void CbNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void CbNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -103,7 +115,7 @@ void CbNode::set_value(const double* params, int offset, bool for_comp, int dept
     allowed_op_funcs::cb(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
 }
 
-void CbNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void CbNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -126,7 +138,7 @@ void CbNode::set_test_value(const double* params, int offset, bool for_comp, int
     );
 }
 
-void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void CbNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -138,3 +150,19 @@ void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void CbParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
index 58f50e530d0d5d1a79488bd2a41cfaec0d053791..1b46092348755264830e8f48ef7852386578b864 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    CbParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CbParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    CbParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    CbParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: cb_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: cb_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: cb_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: cb_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
index 778be205c669dedd2af90dbc94ef445fade8d429..1c78350139f68662841655774f27b374fe8744a3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp>
 
-void generateCbrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateCbrtNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Prevent possible repeats by combining other power operations together
@@ -27,11 +33,11 @@ void generateCbrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned
 CbrtNode::CbrtNode()
 {}
 
-CbrtNode::CbrtNode(node_ptr feat, unsigned long int feat_ind):
+CbrtNode::CbrtNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-CbrtNode::CbrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+CbrtNode::CbrtNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Prevent possible repeats by combining other power operations together
@@ -55,7 +61,7 @@ CbrtNode::CbrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, do
     }
 }
 
-void CbrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void CbrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -70,12 +76,12 @@ void CbrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void CbrtNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void CbrtNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact / 3.0, expected_abs_tot);
 }
 
-void CbrtNode::set_value(int offset, bool for_comp)
+void CbrtNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -92,7 +98,7 @@ void CbrtNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::cbrt(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void CbrtNode::set_test_value(int offset, bool for_comp)
+void CbrtNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::cbrt(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
index a65548293a4c482d7bbcbc465fed0ade2bad9039..f88e1631c5ccabc2149b2558b09ec618f0d570fa 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    CbrtNode(node_ptr feat, unsigned long int feat_ind);
+    CbrtNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CbrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    CbrtNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: cbrt_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit()^(1.0 / 3.0);}
+    inline Unit unit() const {return _feats[0]->unit()^(1.0 / 3.0);}
 
     // DocString: cbrt_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "cbrt({})",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\sqrt[3]{{ {} }}\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cbrt_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cbrt_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::CBRT;}
+    inline NODE_TYPE type() const {return NODE_TYPE::CBRT;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "cbrt";}
+    inline std::string get_postfix_term() const {return "cbrt";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leavesis valid
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
    #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -191,7 +191,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(cbrt({:.10e}*{}{:+15.10e}))",
@@ -208,7 +208,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\sqrt[3]{{ {:.3e}{}{:+8.3e} }}\\right)",
@@ -226,7 +226,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    virtual void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -234,7 +234,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / 3.0 * std::pow(params[0] * vp + params[1], -2.0 / 3.0);});
@@ -251,6 +251,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateCbrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateCbrtNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
index 8d792046a4e41c9ffa6f8f28ac13e8e6915790a9..87de55f6251e134055494b9fe3181aba2aacc8cb 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbrtParamNode)
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateCbrtParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, optimizer);
@@ -31,7 +37,13 @@ void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsi
 CbrtParamNode::CbrtParamNode()
 {}
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+CbrtParamNode::CbrtParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -50,7 +62,7 @@ CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l
     }
 }
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+CbrtParamNode::CbrtParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -58,7 +70,7 @@ CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, std::sha
     get_parameters(optimizer);
 }
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+CbrtParamNode::CbrtParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -85,7 +97,7 @@ void CbrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void CbrtNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void CbrtNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -113,7 +125,7 @@ void CbrtNode::set_value(const double* params, int offset, bool for_comp, int de
     allowed_op_funcs::cbrt(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void CbrtNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void CbrtNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -135,7 +147,7 @@ void CbrtNode::set_test_value(const double* params, int offset, bool for_comp, i
     );
 }
 
-void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void CbrtNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -148,7 +160,7 @@ void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
 
-void CbrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void CbrtParamNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
@@ -160,3 +172,19 @@ void CbrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void CbrtParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
index 8ddb70137761a3b511dbe2ae16bc387c22a4696f..c22626ca09ca39a5a98f53d5c9f921e0350e960f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
@@ -61,7 +61,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    CbrtParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -71,7 +77,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CbrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    CbrtParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -82,7 +88,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    CbrtParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: cbrt_param_node_set_value
     /**
@@ -91,7 +97,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: cbrt_param_node_set_test_value
     /**
@@ -100,7 +106,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -111,7 +117,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -122,24 +128,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: cbrt_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: cbrt_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,7 +178,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Converts a feature into a postfix expression (reverse polish notation)
@@ -183,22 +189,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
index b8c7b93c67a8f6ac197b9d7d23985d7cca313d13..ae621e30c626b46b6daca93e671387337fcb3468 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp>
 
-void generateCosNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateCosNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // If the input feature is united or if the feature is a sine or cosine operation this feature is invalid
@@ -28,11 +34,11 @@ void generateCosNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 CosNode::CosNode()
 {}
 
-CosNode::CosNode(node_ptr feat, unsigned long int feat_ind):
+CosNode::CosNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-CosNode::CosNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+CosNode::CosNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is united or if the feature is a sine or cosine operation this feature is invalid
@@ -54,7 +60,7 @@ CosNode::CosNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     }
 }
 
-void CosNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void CosNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -69,7 +75,7 @@ void CosNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void CosNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void CosNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -84,7 +90,7 @@ void CosNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void CosNode::set_value(int offset, bool for_comp)
+void CosNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -101,7 +107,7 @@ void CosNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::cos(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void CosNode::set_test_value(int offset, bool for_comp)
+void CosNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::cos(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
index 04d9e2ddef943610dac4866ae7f6bb2ea44b8dd4..e7ee85cbc2cb88a79838cd1192123cb3f21ea6da 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    CosNode(node_ptr feat, unsigned long int feat_ind);
+    CosNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CosNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    CosNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: cos_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return Unit();}
+    inline Unit unit() const {return Unit();}
 
     // DocString: cos_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "cos({})",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\cos{{ {} }}\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cos_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: cos_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::COS;}
+    inline NODE_TYPE type() const {return NODE_TYPE::COS;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "cos";}
+    inline std::string get_postfix_term() const {return "cos";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -191,7 +191,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(cos({:.10e}*{}{:+15.10e}))",
@@ -208,7 +208,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\cos{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
@@ -226,7 +226,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -234,7 +234,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::sin(params[0] * vp + params[1]);});
@@ -251,6 +251,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateCosNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateCosNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
index 5e752a840df2e93d046d404b667c2dd028470e6e..bf1f45fe2416824469e7f3dbc06cbbb9dda669eb 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CosParamNode)
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateCosParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -39,7 +45,13 @@ void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 CosParamNode::CosParamNode()
 {}
 
-CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+CosParamNode::CosParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     CosNode(feat, feat_ind)
 {
     // If a feature is a sine or cosine operator then it does not make sense to create this feature
@@ -64,14 +76,14 @@ CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+CosParamNode::CosParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CosNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+CosParamNode::CosParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     CosNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -86,7 +98,7 @@ void CosParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void CosNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void CosNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -114,7 +126,7 @@ void CosNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::cos(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void CosNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void CosNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -137,7 +149,7 @@ void CosNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void CosNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void CosNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[1] = -1.0 * M_PI;
     ub[1] = M_PI;
@@ -149,3 +161,19 @@ void CosNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
+
+void CosParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
index cc53a05ee93a2f65ba014e0b27d891a2096173d9..78580abab1283785dfaf4e354914b14bf1b6d038 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    CosParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    CosParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    CosParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    CosParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: cos_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: cos_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: cos_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: cos_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
index 8132b96e96d3ea47f62f1278ba6796f6f664a9e1..f4314be4e88d39030ea5cad71e7c7f6b423e6eaf 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
@@ -1,6 +1,6 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp>
 
-void generateDivNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateDivNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound)
 {
     ++feat_ind;
     // If either input feature is an inverse or if the second feature is a division operation this feature will be a repeat
@@ -54,11 +54,11 @@ void generateDivNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 DivNode::DivNode()
 {}
 
-DivNode::DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind):
+DivNode::DivNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {}
 
-DivNode::DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+DivNode::DivNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     // If either input feature is an inverse or if the second feature is a division operation this feature will be a repeat
@@ -108,7 +108,7 @@ DivNode::DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
 
 }
 
-void DivNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void DivNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -123,13 +123,13 @@ void DivNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void DivNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void DivNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact, expected_abs_tot);
     _feats[1]->update_div_mult_leaves(div_mult_leaves, -1.0*fact, expected_abs_tot);
 }
 
-void DivNode::set_value(int offset, bool for_comp)
+void DivNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -146,7 +146,7 @@ void DivNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::div(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), _feats[1]->value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void DivNode::set_test_value(int offset, bool for_comp)
+void DivNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::div(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
index 356bc473c112d2d0ba6fd68c7605d2768e18a56a..253174e26a017da23dfef9fe1234c8278e9acd6f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat_2 shared_ptr of the second feature to operate on (B)
      * @param feat_ind Index of the new feature
      */
-    DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind);
+    DivNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -58,19 +58,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound);
+    DivNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: div_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit() / _feats[1]->unit();}
+    inline Unit unit() const {return _feats[0]->unit() / _feats[1]->unit();}
 
     // DocString: div_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({} / {})",
@@ -83,7 +83,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\frac{{ {} }}{{ {} }} \\right)",
@@ -99,7 +99,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: div_node_set_test_value
     /**
@@ -108,7 +108,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: div_node_rung
     /**
@@ -116,18 +116,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
+    inline int rung(const int cur_rung=0) const {return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::DIV;}
+    inline NODE_TYPE type() const {return NODE_TYPE::DIV;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "div";}
+    inline std::string get_postfix_term() const {return "div";}
 
     /**
      * @brief Check if the feature will be valid, if it is then set the value
@@ -142,7 +142,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -152,13 +152,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -173,7 +173,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -193,7 +193,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -202,7 +202,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "({} / ({:.10e}*{}{:+15.10e}))",
@@ -220,7 +220,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\frac{{ {} }}{{ {:.3e}*{}{:+8.3e} }} \\right)",
@@ -239,7 +239,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -247,7 +247,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
         double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
@@ -272,6 +272,6 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateDivNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateDivNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
index dbd21b8057e5516bdfcc1f6c30d38d161b26d7f2..d646042638adb0b9b25f2d63d5288a1760410673 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
@@ -2,7 +2,15 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(DivParamNode)
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateDivParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    const node_ptr feat_2,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     // If one of the features is an inverse operator or feat_2 is a division operator this feature will be a repeat
@@ -39,7 +47,12 @@ DivParamNode::DivParamNode()
 {}
 
 DivParamNode::DivParamNode(
-    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    const node_ptr feat_1,
+    const node_ptr feat_2,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 ) :
     DivNode(feat_1, feat_2, feat_ind)
 {
@@ -66,14 +79,14 @@ DivParamNode::DivParamNode(
     }
 }
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+DivParamNode::DivParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound) :
+DivParamNode::DivParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     DivNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -88,7 +101,7 @@ void DivParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void DivNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void DivNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -126,7 +139,7 @@ void DivNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::div(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
 
-void DivNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void DivNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -159,7 +172,7 @@ void DivNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void DivNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -172,3 +185,20 @@ void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void DivParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[1]->update_postfix(cur_expr, false);
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
index 61a2a09b0e0970db01b39189bee71d76e5ecf079..fbe99494c1259a8b08bc53a465086fc5162c2455 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
@@ -61,7 +61,14 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    DivParamNode(
+        const node_ptr feat_1,
+        const node_ptr feat_2,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -72,7 +79,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    DivParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -83,7 +90,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    DivParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: div_param_node_set_value
     /**
@@ -92,7 +99,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: div_param_node_set_test_value
     /**
@@ -101,7 +108,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -112,7 +119,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -123,24 +130,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: div_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: div_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -174,23 +181,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[1]->update_postfix(cur_expr, false);
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -206,11 +197,11 @@ public:
  */
 void generateDivParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 );
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
index 37e71aede9c9b92b6c8e262eebe8309f1e3a145d..37288b1da5c86ee0f75f71279bf5140090a4b00d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp>
 
-void generateExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateExpNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // If the input feature is united an exponential, logarithm, addition or subtraction then this operation is invalid or a repeated feature
@@ -33,11 +39,11 @@ void generateExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 ExpNode::ExpNode()
 {}
 
-ExpNode::ExpNode(node_ptr feat, unsigned long int feat_ind):
+ExpNode::ExpNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-ExpNode::ExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+ExpNode::ExpNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is united an exponential, logarithm, addition or subtraction then this operation is invalid or a repeated feature
@@ -68,7 +74,7 @@ ExpNode::ExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     set_test_value();
 }
 
-void ExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void ExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -83,7 +89,7 @@ void ExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void ExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void ExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -98,7 +104,7 @@ void ExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void ExpNode::set_value(int offset, bool for_comp)
+void ExpNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -115,7 +121,7 @@ void ExpNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::exp(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void ExpNode::set_test_value(int offset, bool for_comp)
+void ExpNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::exp(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
index 31af898713e1746390d0cb6dccb87d48841311e1..2d2b5fc1bd16bde8f738552c49de3934919bd856 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    ExpNode(node_ptr feat, unsigned long int feat_ind);
+    ExpNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    ExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    ExpNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: exp_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return Unit();}
+    inline Unit unit() const {return Unit();}
 
     // DocString: exp_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "exp({})",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\exp{{ {} }}\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: exp_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: exp_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::EXP;}
+    inline NODE_TYPE type() const {return NODE_TYPE::EXP;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "exp";}
+    inline std::string get_postfix_term() const {return "exp";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -191,7 +191,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(exp({:.10e}*{}{:+15.10e}))",
@@ -208,7 +208,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\exp{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
@@ -226,7 +226,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -234,7 +234,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::exp(params[0] * vp + params[1]);});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateExpNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
index c614e0daf0649c1be6ed7784ce44af0cd30ea428..282d226c3ad7eedd32bfeb30db76f7083bc0f6aa 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(ExpParamNode)
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateExpParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -44,7 +50,13 @@ void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 ExpParamNode::ExpParamNode()
 {}
 
-ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+ExpParamNode::ExpParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     ExpNode(feat, feat_ind)
 {
     // If the input feature is an exponential, addition, subtraction, or logarithm this feature is invalid
@@ -74,14 +86,14 @@ ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+ExpParamNode::ExpParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+ExpParamNode::ExpParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     ExpNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -96,7 +108,7 @@ void ExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void ExpNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void ExpNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -124,7 +136,7 @@ void ExpNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::exp(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void ExpNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void ExpNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -147,7 +159,7 @@ void ExpNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void ExpNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     // The parameters of exponentials are dependent on the external shift/scale parameters, but physically relevant
     lb[0] = 0.0;
@@ -162,3 +174,19 @@ void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void ExpParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
index beb18b4fb23173bc06171589a71f393d05706163..4cd71ee21bf46db91759c89f94ca47b5aa80391d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    ExpParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    ExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    ExpParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    ExpParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: exp_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: exp_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: exp_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: exp_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
index ba50b7053a8b41534898f1caa90769130d39a0b4..36e476ead2495443e7a380aea21f18cdf8577c7a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp>
 
-void generateInvNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateInvNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::INV))
@@ -26,11 +32,11 @@ void generateInvNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 InvNode::InvNode()
 {}
 
-InvNode::InvNode(node_ptr feat, unsigned long int feat_ind):
+InvNode::InvNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-InvNode::InvNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+InvNode::InvNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is an exponential, division or inverse operation this will be a repeat
@@ -53,7 +59,7 @@ InvNode::InvNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
 
 }
 
-void InvNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void InvNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -68,12 +74,12 @@ void InvNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void InvNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void InvNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact * -1.0, expected_abs_tot);
 }
 
-void InvNode::set_value(int offset, bool for_comp)
+void InvNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -90,7 +96,7 @@ void InvNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::inv(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void InvNode::set_test_value(int offset, bool for_comp)
+void InvNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::inv(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
index a564bb4f1742f2f79c00528ca9173da38d2703ab..dba801edbe5a3b4a6644d674b1c9f50167502e13 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
@@ -41,7 +41,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    InvNode(node_ptr feat, unsigned long int feat_ind);
+    InvNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor without checking feature values
@@ -52,19 +52,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    InvNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    InvNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: inv_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit() ^ (-1.0);}
+    inline Unit unit() const {return _feats[0]->unit() ^ (-1.0);}
 
     // DocString: inv_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "(1.0 / {})",
@@ -76,7 +76,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\frac{{1.0}}{{ {} }}\\right)",
@@ -91,7 +91,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: inv_node_set_test_value
     /**
@@ -100,7 +100,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: inv_node_rung
     /**
@@ -108,18 +108,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::INV;}
+    inline NODE_TYPE type() const {return NODE_TYPE::INV;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "inv";}
+    inline std::string get_postfix_term() const {return "inv";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -128,7 +128,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -138,13 +138,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -159,7 +159,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -169,7 +169,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -179,7 +179,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -187,7 +187,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(1.0 / ({:.10e}*{}{:+15.10e}))",
@@ -204,7 +204,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\frac{{1}}{{ {:.3e}{}{:+8.3e} }}\\right)",
@@ -222,7 +222,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -230,7 +230,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 / std::pow(params[0] * vp + params[1], 2.0);});
@@ -247,6 +247,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateInvNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateInvNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
index c2c8430a295bcf8b76c4ff2ded282662a7350bb5..869530544e96b461448bfe02b53db92972ec47fd 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(InvParamNode)
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateInvParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -38,7 +44,13 @@ void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 InvParamNode::InvParamNode()
 {}
 
-InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+InvParamNode::InvParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     InvNode(feat, feat_ind)
 {
     // If the input feature is an inverse operator or a division operator this feature will be a repeat
@@ -62,14 +74,14 @@ InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+InvParamNode::InvParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     InvNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+InvParamNode::InvParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     InvNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -84,7 +96,7 @@ void InvParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void InvNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void InvNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -112,7 +124,7 @@ void InvNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::inv(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void InvNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void InvNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -129,7 +141,7 @@ void InvNode::set_test_value(const double* params, int offset, bool for_comp, in
     allowed_op_funcs::inv(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
 }
 
-void InvNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void InvNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -141,3 +153,19 @@ void InvNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void InvParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
index 35f708901bc1450b41f1e21c7f660d8bf8765e87..45595da50f816754b1e6c56ee9db23235110e7d9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    InvParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    InvParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    InvParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    InvParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: inv_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: inv_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: inv_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: inv_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
index 4e7848f399acc7fcf8a4ebf0fba9b3637ff5f6c0..a666f6acad7c2d0a7062f3a3e825fbf75dc2a4ea 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp>
 
-void generateLogNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateLogNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // If the input feature is united an exponential, logarithm, multiplication, division or a power operation then this operation is invalid or a repeated feature
@@ -40,11 +46,11 @@ void generateLogNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 LogNode::LogNode()
 {}
 
-LogNode::LogNode(node_ptr feat, unsigned long int feat_ind):
+LogNode::LogNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-LogNode::LogNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+LogNode::LogNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is united an exponential, logarithm, multiplication, division or a power operation then this operation is invalid or a repeated feature
@@ -81,7 +87,7 @@ LogNode::LogNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     set_test_value();
 }
 
-void LogNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void LogNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -96,7 +102,7 @@ void LogNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void LogNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void LogNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -111,7 +117,7 @@ void LogNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void LogNode::set_value(int offset, bool for_comp)
+void LogNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -128,7 +134,7 @@ void LogNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::log(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void LogNode::set_test_value(int offset, bool for_comp)
+void LogNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::log(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
index 25dae4139036e8b63a2d2783344df0e9f7b07e5b..8029277f30361a9e642b1dcc91245267264ce882 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    LogNode(node_ptr feat, unsigned long int feat_ind);
+    LogNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    LogNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    LogNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: log_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return Unit();}
+    inline Unit unit() const {return Unit();}
 
     // DocString: log_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "ln({})",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\ln{{ {} }}\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: log_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: log_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::LOG;}
+    inline NODE_TYPE type() const {return NODE_TYPE::LOG;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "log";}
+    inline std::string get_postfix_term() const {return "log";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -191,7 +191,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(ln({:.10e}*{}{:+15.10e}))",
@@ -208,7 +208,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\ln{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
@@ -226,7 +226,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -234,7 +234,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / (params[0] * vp + params[1]);});
@@ -251,6 +251,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateLogNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateLogNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
index 06595cf95a3440657a47e5e33fa60c60dda3906d..b7ea4ad185e79eb96c13c9749521d40b4a709bd2 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(LogParamNode)
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateLogParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -50,7 +56,13 @@ void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 LogParamNode::LogParamNode()
 {}
 
-LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+LogParamNode::LogParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     LogNode(feat, feat_ind)
 {
     // If the input feature is an exponential, division, multiplication, logarithm, or power this feature is invalid
@@ -86,14 +98,14 @@ LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+LogParamNode::LogParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+LogParamNode::LogParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     LogNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -108,7 +120,7 @@ void LogParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void LogNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void LogNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -136,7 +148,7 @@ void LogNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::log(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void LogNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void LogNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -158,7 +170,7 @@ void LogNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void LogNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     // The parameters of logarithm are dependent on the external shift/scale parameters, but physically relevant
     *(lb - from_parent + 1) = 0.0;
@@ -171,3 +183,19 @@ void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void LogParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
index 5e48bc2af8179cd0cdfddfdc3dc0736dcd0e3e4b..c41c11f87667c404852e488bbaf9db727e3d6cd8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    LogParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    LogParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    LogParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    LogParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: log_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: log_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: log_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: log_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
index 7caee800d6d5e9e9db6e6e15056a1a159be95494..30cbbfa86256eefd4ce50539939ed6960f00c69a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
@@ -1,6 +1,6 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp>
 
-void generateMultNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateMultNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound)
 {
     ++feat_ind;
     // If either feature is an inverse or division operation this feature will be a repeat
@@ -58,11 +58,11 @@ void generateMultNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_pt
 MultNode::MultNode()
 {}
 
-MultNode::MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind):
+MultNode::MultNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {}
 
-MultNode::MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+MultNode::MultNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     // If either feature is an inverse or division operation this feature will be a repeat
@@ -115,7 +115,7 @@ MultNode::MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind,
     }
 }
 
-void MultNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void MultNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -130,13 +130,13 @@ void MultNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void MultNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void MultNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact, expected_abs_tot);
     _feats[1]->update_div_mult_leaves(div_mult_leaves, fact, expected_abs_tot);
 }
 
-void MultNode::set_value(int offset, bool for_comp)
+void MultNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -153,7 +153,7 @@ void MultNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::mult(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), _feats[1]->value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void MultNode::set_test_value(int offset, bool for_comp)
+void MultNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::mult(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
index c55b223706eacf4de57233e01727eef339d2ea94..9eafb9bee993bc9205118dc2679c201bc198947b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
@@ -47,7 +47,7 @@ public:
      * @param feat_2 shared_ptr of the second feature to operate on (B)
      * @param feat_ind Index of the new feature
      */
-    MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind);
+    MultNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -59,19 +59,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound);
+    MultNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: mult_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit() * _feats[1]->unit();}
+    inline Unit unit() const {return _feats[0]->unit() * _feats[1]->unit();}
 
     // DocString: mult_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({} * {})",
@@ -84,7 +84,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({} {}\\right)",
@@ -100,7 +100,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: mult_node_set_test_value
     /**
@@ -109,7 +109,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: mult_node_rung
     /**
@@ -117,18 +117,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
+    inline int rung(const int cur_rung=0) const {return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::MULT;}
+    inline NODE_TYPE type() const {return NODE_TYPE::MULT;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "mult";}
+    inline std::string get_postfix_term() const {return "mult";}
 
     /**
      * @brief Check if the feature will be valid, if it is then set the value
@@ -143,7 +143,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -153,13 +153,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -174,7 +174,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -194,7 +194,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -203,7 +203,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "({} * ({:.10e}*{}{:+15.10e}))",
@@ -221,7 +221,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left({} * \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
@@ -240,7 +240,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -248,7 +248,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp){std::copy_n(_feats[0]->value_ptr(params, 2),  _n_samp, dfdp);}
+    inline void param_derivative(const double* params, double* dfdp) const {std::copy_n(_feats[0]->value_ptr(params, 2),  _n_samp, dfdp);}
     #endif
 };
 
@@ -262,6 +262,6 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateMultNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateMultNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
index cc5a82ee5e26234a0d2754339196653dfd3cd3f4..aa6d0fbd5cb9234300dfff234aff6738f6cfcfa5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
@@ -2,7 +2,15 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(MultParamNode)
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateMultParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    const node_ptr feat_2,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, optimizer);
@@ -32,7 +40,13 @@ void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, no
 MultParamNode::MultParamNode()
 {}
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
+MultParamNode::MultParamNode(
+    const node_ptr feat_1,
+    const node_ptr feat_2,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -51,13 +65,13 @@ MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int
     }
 }
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+MultParamNode::MultParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 }
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
+MultParamNode::MultParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -73,7 +87,7 @@ void MultParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void MultNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void MultNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -111,7 +125,7 @@ void MultNode::set_value(const double* params, int offset, bool for_comp, int de
     allowed_op_funcs::mult(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
 
-void MultNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void MultNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -145,7 +159,7 @@ void MultNode::set_test_value(const double* params, int offset, bool for_comp, i
     );
 }
 
-void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void MultNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -158,3 +172,20 @@ void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void MultParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[1]->update_postfix(cur_expr, false);
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
index 78eef1aa2797840c50e0c87209e9c15895deb33d..fc05df89ae142284c85dd064deba1305f619c57b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
@@ -60,7 +60,14 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    MultParamNode(
+        const node_ptr feat_1,
+        const node_ptr feat_2,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +77,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    MultParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +88,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    MultParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: mult_param_node_set_value
     /**
@@ -90,7 +97,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: mult_param_node_set_test_value
     /**
@@ -99,7 +106,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +117,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +128,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: mult_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: mult_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,23 +179,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[1]->update_postfix(cur_expr, false);
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -204,11 +195,11 @@ public:
  */
 void generateMultParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 );
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
index ea168550a5f08fb6581bdb89e9d2f39fd80947de..19327f8d3aca384139cd90ef64f532d5769a44dd 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp>
 
-void generateNegExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateNegExpNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // If the input feature is united an exponential, logarithm, addition or subtraction then this operation is invalid or a repeated feature
@@ -34,11 +40,11 @@ void generateNegExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigne
 NegExpNode::NegExpNode()
 {}
 
-NegExpNode::NegExpNode(node_ptr feat, unsigned long int feat_ind):
+NegExpNode::NegExpNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-NegExpNode::NegExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+NegExpNode::NegExpNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is united an exponential, logarithm, addition or subtraction then this operation is invalid or a repeated feature
@@ -67,7 +73,7 @@ NegExpNode::NegExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound
     }
 }
 
-void NegExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void NegExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -82,7 +88,7 @@ void NegExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leave
     ++expected_abs_tot;
 }
 
-void NegExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void NegExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = "exp(" + _feats[0]->expr() + ")";
     if(div_mult_leaves.count(key) > 0)
@@ -97,7 +103,7 @@ void NegExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_
     expected_abs_tot += std::abs(fact);
 }
 
-void NegExpNode::set_value(int offset, bool for_comp)
+void NegExpNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -114,7 +120,7 @@ void NegExpNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::neg_exp(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void NegExpNode::set_test_value(int offset, bool for_comp)
+void NegExpNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::neg_exp(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
index 94565fa0501b4d69165ed7429208eeed1f7e6c47..21ee392817a4ec3c27f02a1b7ed25eceaf694f46 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    NegExpNode(node_ptr feat, unsigned long int feat_ind);
+    NegExpNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -57,19 +57,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    NegExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    NegExpNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: neg_exp_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return Unit();}
+    inline Unit unit() const {return Unit();}
 
     // DocString: neg_exp_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "(exp(-1.0 * {}))",
@@ -81,7 +81,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\exp{{ \\left(-{} \\right) }} \\right)",
@@ -96,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: neg_exp_node_set_test_value
     /**
@@ -105,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: neg_exp_node_rung
     /**
@@ -113,18 +113,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::NEG_EXP;}
+    inline NODE_TYPE type() const {return NODE_TYPE::NEG_EXP;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "nexp";}
+    inline std::string get_postfix_term() const {return "nexp";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -133,7 +133,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -143,13 +143,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -164,7 +164,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -174,7 +174,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(exp(-1.0 * {:.10e}*{}{:+15.10e}))",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\exp{{ \\left(-\\left({:.3e}{}{:+8.3e} \\right)\\right)}}\\right)",
@@ -227,7 +227,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -235,7 +235,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::exp(-1.0 * (params[0] * vp + params[1]));});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateNegExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateNegExpNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
index f679c72472e2b3abd7064991dcc1b8e700b514bf..f170303e1068044610f7f86c5217c8daa838e6d0 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(NegExpParamNode)
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateNegExpParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -44,7 +50,13 @@ void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, un
 NegExpParamNode::NegExpParamNode()
 {}
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+NegExpParamNode::NegExpParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     NegExpNode(feat, feat_ind)
 {
     // If the input feature is an exponential, addition, subtraction, or logarithm this feature is invalid
@@ -74,14 +86,14 @@ NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, doub
     }
 }
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+NegExpParamNode::NegExpParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
     _params.resize(n_params(),  0.0);
     get_parameters(optimizer);
 }
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+NegExpParamNode::NegExpParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     NegExpNode(feat, feat_ind)
 {
     _params.resize(n_params(),  0.0);
@@ -96,7 +108,7 @@ void NegExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void NegExpNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void NegExpNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -124,7 +136,7 @@ void NegExpNode::set_value(const double* params, int offset, bool for_comp, int
     allowed_op_funcs::neg_exp(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void NegExpNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void NegExpNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -146,7 +158,7 @@ void NegExpNode::set_test_value(const double* params, int offset, bool for_comp,
     );
 }
 
-void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void NegExpNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     // The parameters of exponentials are dependent on the external shift/scale parameters, but physically relevant
     lb[0] = 0.0;
@@ -161,3 +173,19 @@ void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void NegExpParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
index 0b24e557caea22a6f7d46c6beafc5b88eb61ab7c..7fe1651b644117d3bd831b287c86779d7fc8abdd 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    NegExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    NegExpParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    NegExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    NegExpParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    NegExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    NegExpParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: neg_exp_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: neg_exp_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: neg_exp_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: neg_exp_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
index c4a3374b596822bc1c896d16807599cc17a6c22e..b9664df52a0fb1cb9bc7c7784b9d415907878f9a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SinParamNode)
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateSinParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
@@ -39,7 +45,13 @@ void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 SinParamNode::SinParamNode()
 {}
 
-SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+SinParamNode::SinParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     SinNode(feat, feat_ind)
 {
     // If a feature is a sine or cosine operator then it does not make sense to create this feature
@@ -64,14 +76,14 @@ SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     }
 }
 
-SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+SinParamNode::SinParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SinNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+SinParamNode::SinParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     SinNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -86,7 +98,7 @@ void SinParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void SinNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void SinNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -114,7 +126,7 @@ void SinNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::sin(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void SinNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void SinNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -136,7 +148,7 @@ void SinNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SinNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[1] = -1.0 * M_PI;
     ub[1] = M_PI;
@@ -148,3 +160,19 @@ void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void SinParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
index dd1cb23f95adbc0a69017a0675dbfffa1ebc18d3..45bf07a491278611f39b4d6814e1630cabc2706f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    SinParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SinParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    SinParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    SinParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: sin_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: sin_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: sin_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: sin_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
index a91fd44e64fdc085c67b2f8ad6f270cd69b5be1f..bcda54223d337975e8ca67e6b7b6659f5d885626 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp>
 
-void generateSinNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateSinNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // If the input feature is united or if the feature is a sine or cosine operation this feature is invalid
@@ -28,11 +34,11 @@ void generateSinNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 SinNode::SinNode()
 {}
 
-SinNode::SinNode(node_ptr feat, unsigned long int feat_ind):
+SinNode::SinNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-SinNode::SinNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+SinNode::SinNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // If the input feature is united or if the feature is a sine or cosine operation this feature is invalid
@@ -55,7 +61,7 @@ SinNode::SinNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     }
 }
 
-void SinNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void SinNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -70,7 +76,7 @@ void SinNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void SinNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void SinNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -85,7 +91,7 @@ void SinNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void SinNode::set_value(int offset, bool for_comp)
+void SinNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -102,7 +108,7 @@ void SinNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::sin(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void SinNode::set_test_value(int offset, bool for_comp)
+void SinNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::sin(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
index a8c5ca14f9b6f8837467b51748cde237781c3c4a..2e4e3a3df3604bb0ac75b4404d8e32ca72182d40 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    SinNode(node_ptr feat, unsigned long int feat_ind);
+    SinNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -57,19 +57,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SinNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    SinNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: sin_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return Unit();}
+    inline Unit unit() const {return Unit();}
 
     // DocString: sin_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "sin({})",
@@ -81,7 +81,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\sin{{ {} }}\\right)",
@@ -96,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sin_node_set_test_value
     /**
@@ -105,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sin_node_rung
     /**
@@ -113,18 +113,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::SIN;}
+    inline NODE_TYPE type() const {return NODE_TYPE::SIN;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "sin";}
+    inline std::string get_postfix_term() const {return "sin";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -133,7 +133,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -143,13 +143,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -164,7 +164,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -174,7 +174,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(sin({:.10e}*{}{:+15.10e}))",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\sin{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
@@ -226,7 +226,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -234,7 +234,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::cos(params[0] * vp + params[1]);});
@@ -251,6 +251,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateSinNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateSinNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
index 9147a0a421e5a2a701c7b95056dd3b0840817f88..c3de182898c024dec3061081947d3c8bbb3f3006 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SixPowParamNode)
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateSixPowParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, optimizer);
@@ -32,7 +38,13 @@ void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, un
 SixPowParamNode::SixPowParamNode()
 {}
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+SixPowParamNode::SixPowParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -51,14 +63,14 @@ SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, doub
     }
 }
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+SixPowParamNode::SixPowParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+SixPowParamNode::SixPowParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -73,7 +85,7 @@ void SixPowParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void SixPowNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void SixPowNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -101,7 +113,7 @@ void SixPowNode::set_value(const double* params, int offset, bool for_comp, int
     allowed_op_funcs::sixth_pow(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void SixPowNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void SixPowNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -123,7 +135,7 @@ void SixPowNode::set_test_value(const double* params, int offset, bool for_comp,
     );
 }
 
-void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SixPowNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -135,3 +147,19 @@ void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void  SixPowParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
index a0a001a299d9f77262d40fad1b04b6df8beb357c..a6dcf36fcf2319650867d6e0610a94ec259444b4 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SixPowParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    SixPowParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SixPowParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    SixPowParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SixPowParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    SixPowParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: six_pow_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: six_pow_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: six_pow_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: six_pow_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
index a4242e8096482042a473ac1de36de65127797cd4..53bf9805fcd8eb17781d2405d712917f062eeede 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp>
 
-void generateSixPowNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateSixPowNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Prevent possible repeats by combining other power operations together
@@ -34,11 +40,11 @@ void generateSixPowNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigne
 SixPowNode::SixPowNode()
 {}
 
-SixPowNode::SixPowNode(node_ptr feat, unsigned long int feat_ind):
+SixPowNode::SixPowNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-SixPowNode::SixPowNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+SixPowNode::SixPowNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Prevent possible repeats by combining other power operations together
@@ -66,7 +72,7 @@ SixPowNode::SixPowNode(node_ptr feat, unsigned long int feat_ind, double l_bound
     }
 }
 
-void SixPowNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void SixPowNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -81,12 +87,12 @@ void SixPowNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leave
     ++expected_abs_tot;
 }
 
-void SixPowNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void SixPowNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact * 6.0, expected_abs_tot);
 }
 
-void SixPowNode::set_value(int offset, bool for_comp)
+void SixPowNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -103,7 +109,7 @@ void SixPowNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::sixth_pow(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void SixPowNode::set_test_value(int offset, bool for_comp)
+void SixPowNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::sixth_pow(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
index 053eab192d0c167062a9526ee4a0bb902b1e1a38..ac8dfdc6acbdea258ab71f8edd6542b794a4ff94 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    SixPowNode(node_ptr feat, unsigned long int feat_ind);
+    SixPowNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -57,19 +57,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SixPowNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    SixPowNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: six_pow_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit()^(6.0);}
+    inline Unit unit() const {return _feats[0]->unit()^(6.0);}
 
     // DocString: six_pow_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({}^6)",
@@ -81,7 +81,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({}^6\\right)",
@@ -96,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: six_pow_node_set_test_value
     /**
@@ -105,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: six_pow_node_rung
     /**
@@ -113,18 +113,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::SIX_POW;}
+    inline NODE_TYPE type() const {return NODE_TYPE::SIX_POW;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "sp";}
+    inline std::string get_postfix_term() const {return "sp";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -133,7 +133,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -143,13 +143,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -164,7 +164,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -174,7 +174,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -193,7 +193,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(({:.10e}*{}{:+15.10e})^6)",
@@ -210,7 +210,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\left({:.3e}{}{:+8.3e}\\right)^6\\right)",
@@ -228,7 +228,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -236,7 +236,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 6.0 * std::pow(params[0] * vp + params[1], 5.0);});
@@ -253,6 +253,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateSixPowNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateSixPowNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
index 3ba7515bfabfe8177149a6dd8ed12b42f32132fc..ad56e9db2dac5b1128e763780d10f37613104c10 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqParamNode)
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateSqParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, optimizer);
@@ -32,7 +38,13 @@ void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsign
 SqParamNode::SqParamNode()
 {}
 
-SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+SqParamNode::SqParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -51,14 +63,14 @@ SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bou
     }
 }
 
-SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+SqParamNode::SqParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+SqParamNode::SqParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -73,7 +85,7 @@ void SqParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void SqNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void SqNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -101,7 +113,7 @@ void SqNode::set_value(const double* params, int offset, bool for_comp, int dept
     allowed_op_funcs::sq(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void SqNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void SqNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -123,7 +135,7 @@ void SqNode::set_test_value(const double* params, int offset, bool for_comp, int
     );
 }
 
-void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SqNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -135,3 +147,19 @@ void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void SqParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
index ffd19790e13bc3e7235971438031294d752212d4..a0251406c53633447d63e821a1bb6d54a10fe874 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
@@ -60,7 +60,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    SqParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -70,7 +76,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SqParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    SqParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -81,7 +87,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    SqParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: sq_param_node_set_value
     /**
@@ -90,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: sq_param_node_set_test_value
     /**
@@ -99,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -110,7 +116,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -121,24 +127,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: sq_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: sq_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -172,22 +178,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
index 8657dac637f33de9e22ae56e49060fd524f75088..e856482d132417da1999a4de558387abf3eefd95 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp>
 
-void generateSqNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateSqNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Prevent possible repeats by combining other power operations together
@@ -28,11 +34,11 @@ void generateSqNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned lo
 SqNode::SqNode()
 {}
 
-SqNode::SqNode(node_ptr feat, unsigned long int feat_ind):
+SqNode::SqNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-SqNode::SqNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+SqNode::SqNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Prevent possible repeats by combining other power operations together
@@ -55,7 +61,7 @@ SqNode::SqNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double
 
 }
 
-void SqNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void SqNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -70,12 +76,12 @@ void SqNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, i
     ++expected_abs_tot;
 }
 
-void SqNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void SqNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact * 2.0, expected_abs_tot);
 }
 
-void SqNode::set_value(int offset, bool for_comp)
+void SqNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -92,7 +98,7 @@ void SqNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::sq(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void SqNode::set_test_value(int offset, bool for_comp)
+void SqNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::sq(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
index 580ec958a490f6c6b4f9b896f37fcd0d4fd47f3e..6ff265f8dc59d53ab2504b142de16c9f1684d920 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
@@ -45,7 +45,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    SqNode(node_ptr feat, unsigned long int feat_ind);
+    SqNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -56,19 +56,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SqNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    SqNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: sq_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit()^(2.0);}
+    inline Unit unit() const {return _feats[0]->unit()^(2.0);}
 
     // DocString: sq_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({}^2)",
@@ -80,7 +80,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({}^2\\right)",
@@ -95,7 +95,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sq_node_set_test_value
     /**
@@ -104,7 +104,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sq_node_rung
     /**
@@ -112,18 +112,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::SQ;}
+    inline NODE_TYPE type() const {return NODE_TYPE::SQ;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "sq";}
+    inline std::string get_postfix_term() const {return "sq";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -132,7 +132,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -142,13 +142,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -163,7 +163,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -173,7 +173,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -183,7 +183,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(({:.10e}*{}{:+15.10e})^2)",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\left({:.3e}{}{:+8.3e}\\right)^2\\right)",
@@ -227,7 +227,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -235,7 +235,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 2.0 * params[0] * vp + params[1];});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateSqNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateSqNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
index f5f1b016b91f292b54148c769bfbfaf444e3f13d..3c280fcb5022ba7f4ecccd51cbfb65fdb3e15ebe 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
@@ -2,7 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqrtParamNode)
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateSqrtParamNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, optimizer);
@@ -31,7 +37,13 @@ void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsi
 SqrtParamNode::SqrtParamNode()
 {}
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+SqrtParamNode::SqrtParamNode(
+    const node_ptr feat,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -50,7 +62,7 @@ SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l
     }
 }
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+SqrtParamNode::SqrtParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -58,7 +70,7 @@ SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, std::sha
     get_parameters(optimizer);
 }
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound) :
+SqrtParamNode::SqrtParamNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
@@ -85,7 +97,7 @@ void SqrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void SqrtNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void SqrtNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -113,7 +125,7 @@ void SqrtNode::set_value(const double* params, int offset, bool for_comp, int de
     allowed_op_funcs::sqrt(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
-void SqrtNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void SqrtNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
     double* vp_0;
@@ -135,7 +147,7 @@ void SqrtNode::set_test_value(const double* params, int offset, bool for_comp, i
     );
 }
 
-void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SqrtNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = 1.0;
     ub[0] = 1.0;
@@ -148,7 +160,7 @@ void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
 
-void SqrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SqrtParamNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
@@ -160,3 +172,19 @@ void SqrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void SqrtParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
index 233135f9d30c942d28dc7988a2e0a555ade325e7..d0174fc72ebff776726eae77b66f141f86a2631f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
@@ -62,7 +62,13 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    SqrtParamNode(
+        const node_ptr feat,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -72,7 +78,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SqrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    SqrtParamNode(const node_ptr feat, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -83,7 +89,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    SqrtParamNode(const node_ptr feat, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: sqrt_param_node_set_value
     /**
@@ -92,7 +98,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: sqrt_param_node_set_test_value
     /**
@@ -101,7 +107,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -112,7 +118,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -123,24 +129,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: sqrt_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: sqrt_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -173,7 +179,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Converts a feature into a postfix expression (reverse polish notation)
@@ -184,22 +190,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
index fb1a98022e10b46a577c34a441d58c42d7af114c..b7e1c34c17d19a168f26d62f78d365059d4ba989 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
@@ -1,6 +1,12 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp>
 
-void generateSqrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateSqrtNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+)
 {
     ++feat_ind;
     // Prevent possible repeats by combining other power operations together
@@ -33,11 +39,11 @@ void generateSqrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned
 SqrtNode::SqrtNode()
 {}
 
-SqrtNode::SqrtNode(node_ptr feat, unsigned long int feat_ind):
+SqrtNode::SqrtNode(const node_ptr feat, const unsigned long int feat_ind):
     OperatorNode({feat}, feat_ind)
 {}
 
-SqrtNode::SqrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
+SqrtNode::SqrtNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat}, feat_ind)
 {
     // Prevent possible repeats by combining other power operations together
@@ -65,7 +71,7 @@ SqrtNode::SqrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, do
 
 }
 
-void SqrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void SqrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
@@ -80,12 +86,12 @@ void SqrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
     ++expected_abs_tot;
 }
 
-void SqrtNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void SqrtNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     _feats[0]->update_div_mult_leaves(div_mult_leaves, fact / 2.0, expected_abs_tot);
 }
 
-void SqrtNode::set_value(int offset, bool for_comp)
+void SqrtNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -102,7 +108,7 @@ void SqrtNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::sqrt(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void SqrtNode::set_test_value(int offset, bool for_comp)
+void SqrtNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::sqrt(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
index 0d862504f22414fdc63e77e406784fc5c4d48462..c4880d43793a208985014f4bea43b076b9203707 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
@@ -46,7 +46,7 @@ public:
      * @param feat shared_ptr of the feature to operate on (A)
      * @param feat_ind Index of the new feature
      */
-    SqrtNode(node_ptr feat, unsigned long int feat_ind);
+    SqrtNode(const node_ptr feat, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -57,19 +57,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SqrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound);
+    SqrtNode(const node_ptr feat, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: sqrt_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit()^(0.5);}
+    inline Unit unit() const {return _feats[0]->unit()^(0.5);}
 
     // DocString: sqrt_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "sqrt({})",
@@ -81,7 +81,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left(\\sqrt{{ {} }}\\right)",
@@ -96,7 +96,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sqrt_node_set_test_value
     /**
@@ -105,7 +105,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sqrt_node_rung
     /**
@@ -113,18 +113,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung=0){return _feats[0]->rung(cur_rung + 1);}
+    inline int rung(const int cur_rung=0) const {return _feats[0]->rung(cur_rung + 1);}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::SQRT;}
+    inline NODE_TYPE type() const {return NODE_TYPE::SQRT;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "sqrt";}
+    inline std::string get_postfix_term() const {return "sqrt";}
 
     /**
      * @brief update the dictionary used to check if an Add/Sub node is valid
@@ -133,7 +133,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -143,13 +143,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -164,7 +164,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -174,7 +174,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -192,7 +192,7 @@ public:
      * @param params parameter values for non-linear operations
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "(sqrt({:.10e}*{}{:+15.10e}))",
@@ -209,7 +209,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left(\\sqrt{{ {:.3e}{}{:+8.3e} }}\\right)",
@@ -227,7 +227,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+    virtual void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -235,7 +235,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp)
+    inline void param_derivative(const double* params, double* dfdp) const
     {
         double* val_ptr = _feats[0]->value_ptr(params);
         std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 0.5 * std::pow(params[0] * vp + params[1], -0.5);});
@@ -252,6 +252,12 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateSqrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateSqrtNode(
+    std::vector<node_ptr>& feat_list,
+    const node_ptr feat,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
index fc1ae0870f067b01cf0db202fcae6127eb623794..b8f923916c5a377242c0439c90dc3b8ebf519320 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
@@ -2,7 +2,15 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SubParamNode)
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateSubParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    const node_ptr feat_2,
+    unsigned long int& feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, optimizer);
@@ -33,7 +41,12 @@ SubParamNode::SubParamNode()
 {}
 
 SubParamNode::SubParamNode(
-    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+    const node_ptr feat_1,
+    const node_ptr feat_2,
+    const unsigned long int feat_ind,
+    const double l_bound,
+    const double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
 ) :
     SubNode(feat_1, feat_2, feat_ind)
 {
@@ -53,14 +66,14 @@ SubParamNode::SubParamNode(
     }
 }
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+SubParamNode::SubParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 }
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound) :
+SubParamNode::SubParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
@@ -75,7 +88,7 @@ void SubParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
     }
 }
 
-void SubNode::set_value(const double* params, int offset, bool for_comp, int depth)
+void SubNode::set_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     bool is_root = (offset == -1);
     offset += is_root;
@@ -113,7 +126,7 @@ void SubNode::set_value(const double* params, int offset, bool for_comp, int dep
     allowed_op_funcs::sub(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
 
-void SubNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
+void SubNode::set_test_value(const double* params, int offset, const bool for_comp, const int depth) const
 {
     offset += (offset == -1);
 
@@ -146,7 +159,7 @@ void SubNode::set_test_value(const double* params, int offset, bool for_comp, in
     );
 }
 
-void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
+void SubNode::set_bounds(double* lb, double* ub, const int from_parent, const int depth) const
 {
     lb[1] = 0.0;
     ub[1] = 0.0;
@@ -159,3 +172,20 @@ void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
+
+void SubParamNode::update_postfix(std::string& cur_expr, const bool add_params) const
+{
+    std::stringstream postfix;
+    postfix << get_postfix_term();
+    if(add_params)
+    {
+        postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
+        for(int pp = 1; pp < _params.size(); ++pp)
+        {
+            postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+        }
+    }
+    cur_expr = postfix.str() + "|" + cur_expr;
+    _feats[1]->update_postfix(cur_expr, false);
+    _feats[0]->update_postfix(cur_expr, false);
+}
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
index 8d47187ad9b441bbc793f2b648befe3099106525..0c4330ebe199fedf17d3bac3e83a2f03007d5742 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
@@ -61,7 +61,14 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+    SubParamNode(
+        const node_ptr feat_1,
+        const node_ptr feat_2,
+        const unsigned long int feat_ind,
+        const double l_bound,
+        const double u_bound,
+        std::shared_ptr<NLOptimizer> optimizer
+    );
 
     /**
      * @brief Constructor
@@ -72,7 +79,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param optimizer The optimizer to find the optimal parameters for the features
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
+    SubParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -83,7 +90,7 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound=1e-50, double u_bound=1e50);
+    SubParamNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, double const l_bound=1e-50, const double u_bound=1e50);
 
     // DocString: sub_param_node_set_value
     /**
@@ -92,7 +99,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_value(int offset=-1, bool for_comp=false){set_value(_params.data(), offset, for_comp);}
+    inline void set_value(int offset=-1, const bool for_comp=false) const {set_value(_params.data(), offset, for_comp);}
 
     // DocString: sub_param_node_set_test_value
     /**
@@ -101,7 +108,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
+    inline void set_test_value(int offset=-1, const bool for_comp=false) const {set_test_value(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's training data
@@ -112,7 +119,7 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return value_ptr(_params.data(), offset, for_comp);}
+    inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return value_ptr(_params.data(), offset, for_comp);}
 
     /**
      * @brief Get the pointer to the feature's test data
@@ -123,24 +130,24 @@ public:
      *
      * @return pointer to the feature's test values
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return test_value_ptr(_params.data(), offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return test_value_ptr(_params.data(), offset, for_comp);}
 
     // DocString: sub_param_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr(){return expr(_params.data());}
+    inline std::string expr() const {return expr(_params.data());}
 
     // DocString: sub_param_node_get_latex_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr(){return get_latex_expr(_params.data());}
+    inline std::string get_latex_expr() const {return get_latex_expr(_params.data());}
 
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    inline std::vector<double> parameters(){return _params;}
+    inline std::vector<double> parameters() const {return _params;}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -174,23 +181,7 @@ public:
      * @param add_params Add parameters to the expression
      * @return The current postfix expression of the feature
      */
-    void update_postfix(std::string& cur_expr, bool add_params=true)
-    {
-
-        std::stringstream postfix;
-        postfix << get_postfix_term();
-        if(add_params)
-        {
-            postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
-            for(int pp = 1; pp < _params.size(); ++pp)
-            {
-                postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
-            }
-        }
-        cur_expr = postfix.str() + "|" + cur_expr;
-        _feats[1]->update_postfix(cur_expr, false);
-        _feats[0]->update_postfix(cur_expr, false);
-    }
+    void update_postfix(std::string& cur_expr, const bool add_params=true) const;
 };
 
 /**
@@ -206,11 +197,11 @@ public:
  */
 void generateSubParamNode(
     std::vector<node_ptr>& feat_list,
-    node_ptr feat_1,
-    node_ptr feat_2,
+    const node_ptr feat_1,
+    const node_ptr feat_2,
     unsigned long int& feat_ind,
-    double l_bound,
-    double u_bound,
+    const double l_bound,
+    const double u_bound,
     std::shared_ptr<NLOptimizer> optimizer
 );
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
index 386a2b9fa5d5f337d7a9d3029e69ca8ad1d93532..fcb3d97112f5d80ed838c7da998e22fd4cf1a3dc 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
@@ -1,6 +1,6 @@
 #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp>
 
-void generateSubNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
+void generateSubNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound)
 {
     ++feat_ind;
     // If the input features are not of the same unit this operation is invalid
@@ -52,11 +52,11 @@ void generateSubNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 SubNode::SubNode()
 {}
 
-SubNode::SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind):
+SubNode::SubNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {}
 
-SubNode::SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
+SubNode::SubNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     // If the input features are not of the same unit this operation is invalid
@@ -106,13 +106,13 @@ SubNode::SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
     }
 }
 
-void SubNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
+void SubNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const
 {
     _feats[0]->update_add_sub_leaves(add_sub_leaves, pl_mn, expected_abs_tot);
     _feats[1]->update_add_sub_leaves(add_sub_leaves, -1*pl_mn, expected_abs_tot);
 }
 
-void SubNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
+void SubNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
@@ -127,7 +127,7 @@ void SubNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
     expected_abs_tot += std::abs(fact);
 }
 
-void SubNode::set_value(int offset, bool for_comp)
+void SubNode::set_value(int offset, const bool for_comp) const
 {
     double* val_ptr;
     if(_selected && (offset == -1))
@@ -144,7 +144,7 @@ void SubNode::set_value(int offset, bool for_comp)
     allowed_op_funcs::sub(_n_samp, _feats[0]->value_ptr(2 * offset, for_comp), _feats[1]->value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, val_ptr);
 }
 
-void SubNode::set_test_value(int offset, bool for_comp)
+void SubNode::set_test_value(int offset, const bool for_comp) const
 {
     offset += (offset == -1);
     allowed_op_funcs::sub(
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
index 80fcf51abe6f704265e624a872825edb3fcbc3cd..3631dc12f82e8a720569ea7d08a568b094e87f2a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
@@ -47,7 +47,7 @@ public:
      * @param feat_2 shared_ptr of the second feature to operate on (B)
      * @param feat_ind Index of the new feature
      */
-    SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind);
+    SubNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind);
 
     /**
      * @brief Constructor
@@ -59,19 +59,19 @@ public:
      * @param l_bound Minimum absolute value allowed for the feature.
      * @param u_bound Maximum absolute value allowed for the feature.
      */
-    SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound);
+    SubNode(const node_ptr feat_1, const node_ptr feat_2, const unsigned long int feat_ind, const double l_bound, const double u_bound);
 
     // DocString: sub_node_unit
     /**
      * @brief Get the unit of the feature (combine the units of _feats)
      */
-    inline Unit unit(){return _feats[0]->unit();}
+    inline Unit unit() const {return _feats[0]->unit();}
 
     // DocString: sub_node_expr
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string expr()
+    inline std::string expr() const
     {
         return fmt::format(
             "({} - {})",
@@ -84,7 +84,7 @@ public:
     /**
      * @brief Get the expression for the overall feature (From root node down)
      */
-    inline std::string get_latex_expr()
+    inline std::string get_latex_expr() const
     {
         return fmt::format(
             "\\left({} - {}\\right)",
@@ -100,7 +100,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_value(int offset=-1, bool for_comp=false);
+    virtual void set_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sub_node_set_test_value
     /**
@@ -109,7 +109,7 @@ public:
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      * @param for_comp(bool) If true then the evaluation is for comparing features
      */
-    virtual void set_test_value(int offset=-1, bool for_comp=false);
+    virtual void set_test_value(int offset=-1, const bool for_comp=false) const;
 
     // DocString: sub_node_rung
     /**
@@ -117,18 +117,18 @@ public:
      *
      * @param cur_rung The rung current rung of the feature tree (used to recursively calculate rung)
      */
-    inline int rung(int cur_rung = 0){return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
+    inline int rung(const int cur_rung=0) const {return std::max(_feats[0]->rung(cur_rung + 1), _feats[1]->rung(cur_rung + 1));}
 
     /**
      * @brief Returns the type of node this is
      */
-    inline NODE_TYPE type(){return NODE_TYPE::SUB;}
+    inline NODE_TYPE type() const {return NODE_TYPE::SUB;}
 
     /**
      * @brief Get the string character representation of the node for the postfix expression
      * @return the string representation of the node for the postfix expression
      */
-    inline std::string get_postfix_term(){return "sub";}
+    inline std::string get_postfix_term() const {return "sub";}
 
     /**
      * @brief Check if the feature will be valid, if it is then set the value
@@ -143,7 +143,7 @@ public:
      * @param pl_mn if for an addition node: 1 if for a subtraction node: -1
      * @param expected_abs_tot The expected absolute sum of all values in add_sub_leaves
      */
-    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot);
+    void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const;
 
     /**
      * @brief update the dictionary used to check if a mult/div node is valid
@@ -153,13 +153,13 @@ public:
      * @param expected_abs_tot The expected absolute sum of all values in div_mult_leaves
      *
      */
-    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
+    void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const;
 
     #ifdef PARAMETERIZE
     /**
      * @brief The parameters used for introducing more non linearity in the operators
      */
-    virtual std::vector<double> parameters(){return {};}
+    virtual std::vector<double> parameters() const {return {};}
 
     /**
      * @brief Solve the non-linear optimization to set the parameters
@@ -174,7 +174,7 @@ public:
      * @param params The new parameters for the feature
      * @param check_sz if True check the size of the params vector with the expected size
      */
-    virtual void set_parameters(std::vector<double> params, bool check_sz=true){return;}
+    virtual void set_parameters(const std::vector<double> params, const bool check_sz=true){return;}
 
     /**
      * @brief Set the values of the training data for the feature inside of the value storage arrays
@@ -184,7 +184,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief Set the values of the test data for the feature inside of the value storage arrays
@@ -194,7 +194,7 @@ public:
      * @param for_comp(bool) If true then the evaluation is for comparing features
      * @param depth the max depth of paramterization (in the binary expression tree)
      */
-    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+    void set_test_value(const double* params, int offset=-1, const bool for_comp=false, const int depth=1) const;
 
     /**
      * @brief The expression of the feature
@@ -203,7 +203,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return feature expression
      */
-    inline std::string expr(double* params, int depth=1)
+    inline std::string expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "({} - ({:.10e}*{}{:+15.10e}))",
@@ -221,7 +221,7 @@ public:
      * @param depth the current depth of the node on the Binary expression tree
      * @return the latexified expression
      */
-    inline std::string get_latex_expr(double* params, int depth=1)
+    inline std::string get_latex_expr(const double* params, const int depth=1) const
     {
         return fmt::format(
             "\\left({} - \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
@@ -240,7 +240,7 @@ public:
      * @param from_parent How many parameters are between the start of this node's parameters and its parent
      * @param depth the current depth of the node on the Binary expression tree
      */
-    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+    void set_bounds(double* lb, double* ub, const int from_parent=2, const int depth=1) const;
 
     /**
      * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
@@ -248,7 +248,7 @@ public:
      * @param params pointer to the parameters
      * @param dfdp pointer to where the feature derivative pointers are located
      */
-    inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp, _n_samp, -1.0);}
+    inline void param_derivative(const double* params, double* dfdp) const {std::fill_n(dfdp, _n_samp, -1.0);}
     #endif
 };
 
@@ -262,6 +262,6 @@ public:
  * @param l_bound lower bound for the maximum feature value
  * @param u_bound upper bound for the maximum feature value
  */
-void generateSubNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
+void generateSubNode(std::vector<node_ptr>& feat_list, const node_ptr feat_1, const node_ptr feat_2, unsigned long int& feat_ind, const double l_bound, const double u_bound);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_ops.hpp b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
index 3fe24a89b9d2b865ebcecb133bdadd03f3aae160..8676e617d6e0f6dfe75afba0f5146e150d9841a2 100644
--- a/src/feature_creation/node/operator_nodes/allowed_ops.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
@@ -48,12 +48,12 @@
 #include <map>
 #include <iostream>
 
-typedef std::function<void(std::vector<node_ptr>&, node_ptr, unsigned long int&, double, double)> un_op_node_gen;
-typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, unsigned long int&, double, double)> bin_op_node_gen;
+typedef std::function<void(std::vector<node_ptr>&, const node_ptr, unsigned long int&, const double, const double)> un_op_node_gen;
+typedef std::function<void(std::vector<node_ptr>&, const node_ptr, const node_ptr, unsigned long int&, const double, const double)> bin_op_node_gen;
 
 #ifdef PARAMETERIZE
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, unsigned long int&, double, double, std::shared_ptr<NLOptimizer>)> un_param_op_node_gen;
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, unsigned long int&, double, double, std::shared_ptr<NLOptimizer>)> bin_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, const node_ptr, unsigned long int&, const double, const double, const std::shared_ptr<NLOptimizer>)> un_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, const node_ptr, const node_ptr, unsigned long int&, const double, const double, const std::shared_ptr<NLOptimizer>)> bin_param_op_node_gen;
 #endif
 
 namespace allowed_op_maps
diff --git a/src/feature_creation/node/operator_nodes/functions.hpp b/src/feature_creation/node/operator_nodes/functions.hpp
index 06f1f64a3d2d33289a6da49f7e663ab716d7e274..43394d5bcc766827a908b418c30db9da9b56ae87 100644
--- a/src/feature_creation/node/operator_nodes/functions.hpp
+++ b/src/feature_creation/node/operator_nodes/functions.hpp
@@ -22,7 +22,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void add(int size, double* in_0, double* in_1, double alpha, double a, double* out)
+    inline void add(const int size, const double* in_0, const double* in_1, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, in_1, out, [&](double in_0, double in_1){return in_0 + (alpha * in_1 + a);});
     }
@@ -37,7 +37,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void sub(int size, double* in_0, double* in_1, double alpha, double a, double* out)
+    inline void sub(const int size, const double* in_0, const double* in_1, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, in_1, out, [&](double in_0, double in_1){return in_0 - (alpha * in_1 + a);});
     }
@@ -52,7 +52,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void abs_diff(int size, double* in_0, double* in_1, double alpha, double a, double* out)
+    inline void abs_diff(const int size, const double* in_0, const double* in_1, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, in_1, out, [&](double in_0, double in_1){return std::abs(in_0 - (alpha * in_1 + a));});
     }
@@ -67,7 +67,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void mult(int size, double* in_0, double* in_1, double alpha, double a, double* out)
+    inline void mult(const int size, const double* in_0, const double* in_1, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, in_1, out, [&](double in_0, double in_1){return in_0 * (alpha * in_1 + a);});
     }
@@ -82,7 +82,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void div(int size, double* in_0, double* in_1, double alpha, double a, double* out)
+    inline void div(const int size, const double* in_0, const double* in_1, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, in_1, out, [&](double in_0, double in_1){return in_0 / (alpha * in_1 + a);});
     }
@@ -96,7 +96,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void exp(int size, double* in_0, double alpha, double a, double* out)
+    inline void exp(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::exp(alpha * in_0 + a);});
     }
@@ -110,7 +110,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void neg_exp(int size, double* in_0, double alpha, double a, double* out)
+    inline void neg_exp(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::exp(-1.0*(alpha * in_0) + a);});
     }
@@ -124,7 +124,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parametere
      * @param out pointer to the output array
      */
-    inline void sq(int size, double* in_0, double alpha, double a, double* out)
+    inline void sq(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::pow(alpha * in_0 + a, 2.0);});
     }
@@ -138,7 +138,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parametere
      * @param out pointer to the output array
      */
-    inline void cb(int size, double* in_0, double alpha, double a, double* out)
+    inline void cb(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::pow(alpha * in_0 + a, 3.0);});
     }
@@ -152,7 +152,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void sixth_pow(int size, double* in_0, double alpha, double a, double* out)
+    inline void sixth_pow(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::pow(alpha * in_0 + a, 6.0);});
     }
@@ -166,7 +166,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void cbrt(int size, double* in_0, double alpha, double a, double* out)
+    inline void cbrt(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::pow(alpha * in_0 + a, 1.0/3.0);});
     }
@@ -180,7 +180,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void sqrt(int size, double* in_0, double alpha, double a, double* out)
+    inline void sqrt(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::sqrt(alpha * in_0 + a);});
     }
@@ -194,7 +194,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void inv(int size, double* in_0, double alpha, double a, double* out)
+    inline void inv(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return 1.0 / (alpha * in_0 + a);});
     }
@@ -208,7 +208,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void log(int size, double* in_0, double alpha, double a, double* out)
+    inline void log(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::log(alpha * in_0 + a);});
     }
@@ -222,7 +222,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void sin(int size, double* in_0, double alpha, double a, double* out)
+    inline void sin(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::sin(alpha * in_0 + a);});
     }
@@ -236,7 +236,7 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void cos(int size, double* in_0, double alpha, double a, double* out)
+    inline void cos(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::cos(alpha * in_0 + a);});
     }
@@ -250,10 +250,10 @@ namespace allowed_op_funcs
      * @param a The feature shift parameter
      * @param out pointer to the output array
      */
-    inline void abs(int size, double* in_0, double alpha, double a, double* out)
+    inline void abs(const int size, const double* in_0, const double alpha, const double a, double* out)
     {
         std::transform(in_0, in_0 + size, out, [&](double in_0){return std::abs(alpha * in_0 + a);});
     }
 };
 
-#endif
\ No newline at end of file
+#endif
diff --git a/src/feature_creation/node/utils.cpp b/src/feature_creation/node/utils.cpp
index f357bbdf74c540e5cb5b57ca3afc0f4d38587b11..c87aa906cae8405fb99adecac58a3611c3adecb3 100644
--- a/src/feature_creation/node/utils.cpp
+++ b/src/feature_creation/node/utils.cpp
@@ -1,7 +1,7 @@
 #include <feature_creation/node/utils.hpp>
 
 #ifdef PARAMETERIZE
-void str2node::set_parameters(node_ptr feat, std::vector<std::string> op_terms)
+void str2node::set_parameters(node_ptr feat, const std::vector<std::string> op_terms)
 {
     std::vector<double> parameters(op_terms.size() - 1, 0.0);
     std::transform(op_terms.begin() + 1, op_terms.end(), parameters.begin(), [](std::string str){return std::stod(str);});
@@ -9,7 +9,7 @@ void str2node::set_parameters(node_ptr feat, std::vector<std::string> op_terms)
 }
 #endif
 
-node_ptr str2node::postfix2node(std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind)
+node_ptr str2node::postfix2node(const std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind)
 {
     std::vector<node_ptr> stack;
     std::vector<std::string> postfix_split = str_utils::split_string_trim(postfix_expr, "|");
@@ -324,7 +324,7 @@ node_ptr str2node::postfix2node(std::string postfix_expr, const std::vector<node
     return stack[0];
 }
 
-std::vector<node_ptr> str2node::phi_selected_from_file(std::string filename, std::vector<node_ptr> phi_0)
+std::vector<node_ptr> str2node::phi_selected_from_file(const std::string filename, const std::vector<node_ptr> phi_0)
 {
     node_value_arrs::resize_values_arr(0, phi_0.size(), true);
     node_value_arrs::initialize_d_matrix_arr();
@@ -363,7 +363,7 @@ std::vector<node_ptr> str2node::phi_selected_from_file(std::string filename, std
     return phi_selected;
 }
 
-std::vector<node_ptr> str2node::phi_from_file(std::string filename, std::vector<node_ptr> phi_0)
+std::vector<node_ptr> str2node::phi_from_file(const std::string filename, const std::vector<node_ptr> phi_0)
 {
     node_value_arrs::resize_values_arr(0, phi_0.size(), true);
 
@@ -405,7 +405,7 @@ std::vector<node_ptr> str2node::phi_from_file(std::string filename, std::vector<
     return phi;
 }
 
-std::string node_identifier::feature_type_to_string(NODE_TYPE nt)
+std::string node_identifier::feature_type_to_string(const NODE_TYPE nt)
 {
     if(nt == NODE_TYPE::FEAT)
     {
diff --git a/src/feature_creation/node/utils.hpp b/src/feature_creation/node/utils.hpp
index d08d85f985ac1b9f155caf59d44d75fb3b4af01e..b3c7567d6029eeac275b46854314129e256619d1 100644
--- a/src/feature_creation/node/utils.hpp
+++ b/src/feature_creation/node/utils.hpp
@@ -26,7 +26,7 @@ namespace str2node
  * @param feat_ind The desired feature index
  * @return The feature node described by the postfix expression
  */
-node_ptr postfix2node(std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind);
+node_ptr postfix2node(const std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind);
 
 /**
  * @brief Convert a feature_space/selected_features.txt into a phi_selected;
@@ -37,7 +37,7 @@ node_ptr postfix2node(std::string postfix_expr, const std::vector<node_ptr>& phi
  *
  * @return The selected feature set from the file
  */
-std::vector<node_ptr> phi_selected_from_file(std::string filename, std::vector<node_ptr> phi_0);
+std::vector<node_ptr> phi_selected_from_file(const std::string filename, const std::vector<node_ptr> phi_0);
 
 /**
  * @brief Convert a text file into the feature space
@@ -48,7 +48,7 @@ std::vector<node_ptr> phi_selected_from_file(std::string filename, std::vector<n
  *
  * @return The feature set defined from the file
  */
-std::vector<node_ptr> phi_from_file(std::string filename, std::vector<node_ptr> phi_0);
+std::vector<node_ptr> phi_from_file(const std::string filename, const std::vector<node_ptr> phi_0);
 
 #ifdef PARAMETERIZE
 /**
@@ -57,7 +57,7 @@ std::vector<node_ptr> phi_from_file(std::string filename, std::vector<node_ptr>
  * @param feat Features whose parameters need to be set
  * @param op_terms term used to define the object
  */
-void set_parameters(node_ptr feat, std::vector<std::string> op_terms);
+void set_parameters(node_ptr feat, const std::vector<std::string> op_terms);
 #endif
 }
 namespace node_identifier
@@ -68,6 +68,6 @@ namespace node_identifier
  * @param nt node type
  * @return string representation of the node type
  */
-std::string feature_type_to_string(NODE_TYPE nt);
+std::string feature_type_to_string(const NODE_TYPE nt);
 }
 #endif
diff --git a/src/feature_creation/node/value_storage/nodes_value_containers.cpp b/src/feature_creation/node/value_storage/nodes_value_containers.cpp
index 6adcfec0792ba2ce21990f4d98db2749029243f7..ba603c17cf63f5feeea4ed9e652f8fa21d5c9088 100644
--- a/src/feature_creation/node/value_storage/nodes_value_containers.cpp
+++ b/src/feature_creation/node/value_storage/nodes_value_containers.cpp
@@ -22,7 +22,7 @@ std::vector<double> node_value_arrs::TEST_VALUES_ARR;
 std::vector<double> node_value_arrs::TEMP_STORAGE_ARR;
 std::vector<double> node_value_arrs::TEMP_STORAGE_TEST_ARR;
 
-void node_value_arrs::initialize_values_arr(int n_samples, int n_samples_test, int n_primary_feat, int max_rung, bool set_task_sz)
+void node_value_arrs::initialize_values_arr(const int n_samples, const int n_samples_test, const int n_primary_feat, const int max_rung, const bool set_task_sz)
 {
     if(max_rung < 0)
     {
@@ -61,7 +61,7 @@ void node_value_arrs::initialize_values_arr(int n_samples, int n_samples_test, i
     TEMP_STORAGE_TEST_REG = std::vector<int>(MAX_N_THREADS * (N_OP_SLOTS * N_STORE_FEATURES + 1), -1);
 }
 
-void node_value_arrs::initialize_values_arr(std::vector<int> task_sz_train, std::vector<int> task_sz_test, int n_primary_feat, int max_rung)
+void node_value_arrs::initialize_values_arr(const std::vector<int> task_sz_train, const std::vector<int> task_sz_test, const int n_primary_feat, const int max_rung)
 {
     TASK_SZ_TRAIN = task_sz_train;
     TASK_SZ_TEST = task_sz_test;
@@ -75,7 +75,7 @@ void node_value_arrs::initialize_values_arr(std::vector<int> task_sz_train, std:
     );
 }
 
-void node_value_arrs::set_task_sz_train(std::vector<int> task_sz_train)
+void node_value_arrs::set_task_sz_train(const std::vector<int> task_sz_train)
 {
     if(std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0) != N_SAMPLES)
     {
@@ -84,7 +84,7 @@ void node_value_arrs::set_task_sz_train(std::vector<int> task_sz_train)
     TASK_SZ_TRAIN = task_sz_train;
 }
 
-void node_value_arrs::set_task_sz_test(std::vector<int> task_sz_test)
+void node_value_arrs::set_task_sz_test(const std::vector<int> task_sz_test)
 {
     if(std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0) != N_SAMPLES_TEST)
     {
@@ -93,7 +93,7 @@ void node_value_arrs::set_task_sz_test(std::vector<int> task_sz_test)
     TASK_SZ_TEST = task_sz_test;
 }
 
-void node_value_arrs::resize_values_arr(int n_dims, int n_feat, bool use_temp)
+void node_value_arrs::resize_values_arr(const int n_dims, const int n_feat, const bool use_temp)
 {
     if(n_dims > MAX_RUNG)
     {
@@ -139,7 +139,14 @@ void node_value_arrs::resize_values_arr(int n_dims, int n_feat, bool use_temp)
     }
 }
 
-double* node_value_arrs::get_value_ptr(unsigned long int arr_ind, unsigned long int feat_ind, int rung, int offset, bool for_comp, bool modify_reg)
+double* node_value_arrs::get_value_ptr(
+    const unsigned long int arr_ind,
+    const unsigned long int feat_ind,
+    const int rung,
+    const int offset,
+    const bool for_comp,
+    const bool modify_reg
+)
 {
     if(modify_reg && (rung <= N_RUNGS_STORED))
     {
@@ -157,7 +164,14 @@ double* node_value_arrs::get_value_ptr(unsigned long int arr_ind, unsigned long
     );
 }
 
-double* node_value_arrs::get_test_value_ptr(unsigned long int arr_ind, unsigned long int feat_ind, int rung, int offset, bool for_comp, bool modify_reg)
+double* node_value_arrs::get_test_value_ptr(
+    const unsigned long int arr_ind,
+    const unsigned long int feat_ind,
+    const int rung,
+    const int offset,
+    const bool for_comp,
+    const bool modify_reg
+)
 {
     if(modify_reg && (rung <= N_RUNGS_STORED))
     {
@@ -181,7 +195,7 @@ void node_value_arrs::initialize_d_matrix_arr()
     D_MATRIX = std::vector<double>(0);
 }
 
-void node_value_arrs::resize_d_matrix_arr(int n_select)
+void node_value_arrs::resize_d_matrix_arr(const int n_select)
 {
     N_SELECTED += n_select;
     D_MATRIX.resize(N_SELECTED * N_SAMPLES, 0.0);
diff --git a/src/feature_creation/node/value_storage/nodes_value_containers.hpp b/src/feature_creation/node/value_storage/nodes_value_containers.hpp
index 6ae367b5450a62c80201b047359f4b6ea5b3ab22..6f0937bfea1a379482f9c5a0b29407c756c0caae 100644
--- a/src/feature_creation/node/value_storage/nodes_value_containers.hpp
+++ b/src/feature_creation/node/value_storage/nodes_value_containers.hpp
@@ -53,7 +53,7 @@ namespace node_value_arrs
      * @param max_rung Largest rung of a feature
      * @param set_test_task_sz If True reset the task_sz vectors
      */
-    void initialize_values_arr(int n_samples, int n_samples_test, int n_primary_feat, int max_rung, bool et_task_sz);
+    void initialize_values_arr(const int n_samples, const int n_samples_test, const int n_primary_feat, const int max_rung, const bool et_task_sz);
 
     /**
      * @brief Initialize the node value arrays
@@ -64,7 +64,7 @@ namespace node_value_arrs
      * @param n_primary_feat Number of primary features
      * @param max_rung Largest rung of a feature
      */
-    inline void initialize_values_arr(int n_samples, int n_samples_test, int n_primary_feat, int max_rung)
+    inline void initialize_values_arr(const int n_samples, const int n_samples_test, const int n_primary_feat, const int max_rung)
     {
         initialize_values_arr(n_samples, n_samples_test, n_primary_feat, max_rung, true);
     }
@@ -78,7 +78,7 @@ namespace node_value_arrs
      * @param n_primary_feat Number of primary features
      * @param max_rung Largest rung of a feature
      */
-    void initialize_values_arr(std::vector<int> task_sz_train, std::vector<int> task_sz_test, int n_primary_feat, int max_rung);
+    void initialize_values_arr(const std::vector<int> task_sz_train, const std::vector<int> task_sz_test, const int n_primary_feat, const int max_rung);
 
     /**
      * @brief Resize the node value arrays
@@ -88,7 +88,7 @@ namespace node_value_arrs
      * @param n_feat number of features to store
      * @param use_temp If true keep the temporary_storage
      */
-    void resize_values_arr(int n_dims, int n_feat, bool use_temp);
+    void resize_values_arr(const int n_dims, const int n_feat, const bool use_temp);
 
     /**
      * @brief Initialize the descriptor matrix
@@ -103,21 +103,21 @@ namespace node_value_arrs
      *
      * @param n_select Number of features to select
      */
-    void resize_d_matrix_arr(int n_select);
+    void resize_d_matrix_arr(const int n_select);
 
     /**
      * @brief Reset the global TASK_SZ_TRAIN vector
      *
      * @param task_sz_train the new task_sz train
      */
-    void set_task_sz_train(std::vector<int> task_sz_train);
+    void set_task_sz_train(const std::vector<int> task_sz_train);
 
     /**
      * @brief Reset the global TASK_SZ_TEST vector
      *
      * @param task_sz_train the new test_sz train
      */
-    void set_task_sz_test(std::vector<int> task_sz_test);
+    void set_task_sz_test(const std::vector<int> task_sz_test);
 
     /**
      * @brief Get the operator slot associated with a given rung/offset
@@ -128,7 +128,7 @@ namespace node_value_arrs
      *
      * @return The operator slot to use
      */
-    inline int get_op_slot(int rung, int offset, bool for_comp)
+    inline int get_op_slot(const int rung, const int offset, const bool for_comp)
     {
         return std::abs(N_OP_SLOTS / (1 + !for_comp) - static_cast<int>(std::pow(2, MAX_RUNG - rung)) - offset);
     }
@@ -141,7 +141,7 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and op_slot
      */
-    inline int& temp_storage_reg(unsigned long int ind, int op_slot=0)
+    inline int& temp_storage_reg(const unsigned long int ind, const int op_slot=0)
     {
         return TEMP_STORAGE_REG[
             (ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
@@ -156,7 +156,7 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and op_slot
      */
-    inline int& temp_storage_test_reg(unsigned long int ind, int op_slot=0)
+    inline int& temp_storage_test_reg(const unsigned long int ind, const int op_slot=0)
     {
         return TEMP_STORAGE_TEST_REG[
             (ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
@@ -173,7 +173,7 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and offset
      */
-    inline int& temp_storage_reg(unsigned long int ind, int rung, int offset, bool for_comp)
+    inline int& temp_storage_reg(const unsigned long int ind, const int rung, const int offset, const bool for_comp)
     {
         return TEMP_STORAGE_REG[
             (ind % N_STORE_FEATURES) +
@@ -192,7 +192,7 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and offset
      */
-    inline int& temp_storage_test_reg(unsigned long int ind, int rung, int offset, bool for_comp)
+    inline int& temp_storage_test_reg(const unsigned long int ind, const int rung, const int offset, const bool for_comp)
     {
         return TEMP_STORAGE_TEST_REG[
             (ind % N_STORE_FEATURES) +
@@ -208,7 +208,7 @@ namespace node_value_arrs
      *
      * @return pointer to the Node's training data
      */
-    inline double* access_value_arr(unsigned long int feature_ind){return &VALUES_ARR[feature_ind*N_SAMPLES];}
+    inline double* access_value_arr(const unsigned long int feature_ind){return &VALUES_ARR[feature_ind*N_SAMPLES];}
 
     /**
      * @brief Access element of the permanent test data storage array
@@ -217,7 +217,7 @@ namespace node_value_arrs
      *
      * @return pointer to the Node's test data
      */
-    inline double* access_test_value_arr(unsigned long int feature_ind){return &TEST_VALUES_ARR[feature_ind*N_SAMPLES_TEST];}
+    inline double* access_test_value_arr(const unsigned long int feature_ind){return &TEST_VALUES_ARR[feature_ind*N_SAMPLES_TEST];}
 
     /**
      * @brief Access element of temporary storage array for the training data
@@ -226,7 +226,7 @@ namespace node_value_arrs
      *
      * @return pointer to the data stored in the specified slot
      */
-    inline double* access_temp_storage(unsigned long int slot){return &TEMP_STORAGE_ARR[slot*N_SAMPLES];}
+    inline double* access_temp_storage(const unsigned long int slot){return &TEMP_STORAGE_ARR[slot*N_SAMPLES];}
 
     /**
      * @brief Access element of temporary storage array for the test data
@@ -235,7 +235,7 @@ namespace node_value_arrs
      *
      * @return pointer to the data stored in the specified slot
      */
-    inline double* access_temp_storage_test(unsigned long int slot){return &TEMP_STORAGE_TEST_ARR[slot*N_SAMPLES_TEST];}
+    inline double* access_temp_storage_test(const unsigned long int slot){return &TEMP_STORAGE_TEST_ARR[slot*N_SAMPLES_TEST];}
 
     /**
      * @brief Get a Node's value_ptr
@@ -249,7 +249,14 @@ namespace node_value_arrs
      *
      * @return The value pointer
      */
-    double* get_value_ptr(unsigned long int arr_ind, unsigned long int feat_ind, int rung=0, int offset=0, bool for_comp=false, bool modify_reg=true);
+    double* get_value_ptr(
+        const unsigned long int arr_ind,
+        const unsigned long int feat_ind,
+        const int rung=0,
+        const int offset=0,
+        const bool for_comp=false,
+        const bool modify_reg=true
+    );
 
     /**
      * @brief Get a Node's test_value_ptr
@@ -263,7 +270,14 @@ namespace node_value_arrs
      *
      * @return The value pointer
      */
-    double* get_test_value_ptr(unsigned long int arr_ind, unsigned long int feat_ind, int rung=0, int offset=0, bool for_comp=false, bool modify_reg=true);
+    double* get_test_value_ptr(
+        const unsigned long int arr_ind,
+        const unsigned long int feat_ind,
+        const int rung=0,
+        const int offset=0,
+        const bool for_comp=false,
+        const bool modify_reg=true
+    );
 
     /**
      * @brief Get the pointer to a particular selected Node from sis
@@ -271,7 +285,7 @@ namespace node_value_arrs
      * @param ind Index of the data in the descriptor matrix
      * @return The pointer to the descriptor matrix's data
      */
-    inline double* get_d_matrix_ptr(int ind){return &D_MATRIX[ind * N_SAMPLES];}
+    inline double* get_d_matrix_ptr(const int ind){return &D_MATRIX[ind * N_SAMPLES];}
 
     /**
      * @brief Flush the temporary storage register (training data)
diff --git a/src/feature_creation/units/Unit.cpp b/src/feature_creation/units/Unit.cpp
index 9f0df5b78c5dbf90b475ef7cd6c309b27a60dc00..91cf989a49f3a1d220303d878ef2a4e2e10789b4 100644
--- a/src/feature_creation/units/Unit.cpp
+++ b/src/feature_creation/units/Unit.cpp
@@ -3,7 +3,7 @@
 Unit::Unit() :
     _dct()
 {}
-Unit::Unit(std::map<std::string, double> dct) :
+Unit::Unit(const std::map<std::string, double> dct) :
     _dct(dct)
 {}
 
@@ -151,7 +151,7 @@ std::string Unit::toLatexString() const
     return unit_rep.str();
 }
 
-Unit Unit::operator*(Unit unit_2)
+Unit Unit::operator*(const Unit unit_2) const
 {
     std::map<std::string, double> to_out = dct();
     for(auto& el : unit_2.dct())
@@ -168,7 +168,7 @@ Unit Unit::operator*(Unit unit_2)
     return Unit(to_out);
 }
 
-Unit Unit::operator/(Unit unit_2)
+Unit Unit::operator/(const Unit unit_2) const
 {
     std::map<std::string, double> to_out = dct();
     for(auto& el : unit_2.dct())
@@ -186,7 +186,7 @@ Unit Unit::operator/(Unit unit_2)
     return Unit(to_out);
 }
 
-Unit& Unit::operator*=(Unit unit_2)
+Unit& Unit::operator*=(const Unit unit_2)
 {
     for(auto& el : unit_2.dct())
     {
@@ -202,7 +202,7 @@ Unit& Unit::operator*=(Unit unit_2)
     return *this;
 }
 
-Unit& Unit::operator/=(Unit unit_2)
+Unit& Unit::operator/=(const Unit unit_2)
 {
     for(auto& el : unit_2.dct())
     {
@@ -219,7 +219,7 @@ Unit& Unit::operator/=(Unit unit_2)
     return *this;
 }
 
-Unit Unit::operator^(double power)
+Unit Unit::operator^(const double power) const
 {
     std::map<std::string, double> to_out = dct();
     if(power == 0.0)
@@ -234,7 +234,7 @@ Unit Unit::operator^(double power)
     return Unit(to_out);
 }
 
-Unit Unit::inverse()
+Unit Unit::inverse() const
 {
     std::map<std::string, double> to_out = dct();
     for(auto& el : to_out)
@@ -244,7 +244,7 @@ Unit Unit::inverse()
     return Unit(to_out);
 }
 
-bool Unit::equal(Unit unit_2) const
+bool Unit::equal(const Unit unit_2) const
 {
     for(auto& el : unit_2.dct())
     {
diff --git a/src/feature_creation/units/Unit.hpp b/src/feature_creation/units/Unit.hpp
index 599ce14374076701f8d65e6c95db70416ba0391a..f6189df245e1b99ef445082ad801a0771b2d41e7 100644
--- a/src/feature_creation/units/Unit.hpp
+++ b/src/feature_creation/units/Unit.hpp
@@ -58,7 +58,7 @@ public:
      *
      * @param dct dictionary representation of the dictionary
      */
-    Unit(std::map<std::string, double> dct);
+    Unit(const std::map<std::string, double> dct);
 
     // DocString: unit_init_str
     /**
@@ -96,7 +96,7 @@ public:
      * @param unit_2 The second unit to multiply by
      * @return The product of this unit with unit_2
      */
-    Unit operator*(Unit unit_2);
+    Unit operator*(const Unit unit_2) const;
 
     // DocString: unit_div
     /**
@@ -105,7 +105,7 @@ public:
      * @param unit_2 The second unit to divide by
      * @return The quotient of this unit with unit_2
      */
-    Unit operator/(Unit unit_2);
+    Unit operator/(const Unit unit_2) const;
 
     // DocString: unit_mult_assign
     /**
@@ -114,7 +114,7 @@ public:
      * @param unit_2 The second unit to multiply this Unit by
      * @return The product of this unit with unit_2
      */
-    Unit& operator*=(Unit unit_2);
+    Unit& operator*=(const Unit unit_2);
 
     // DocString: unit_div_assign
     /**
@@ -123,7 +123,7 @@ public:
      * @param unit_2 The second unit to divide this Unit by
      * @return The quotient of this unit with unit_2
      */
-    Unit& operator/=(Unit unit_2);
+    Unit& operator/=(const Unit unit_2);
 
     // DocString: unit_pow
     /**
@@ -132,7 +132,7 @@ public:
      * @param power power to exponentiate the unit
      * @return The unit raised to the power
      */
-    Unit operator^(double power);
+    Unit operator^(const double power) const;
 
     // DocString: unit_inverse
     /**
@@ -140,7 +140,7 @@ public:
      *
      * @return The inverse of this unit
      */
-    Unit inverse();
+    Unit inverse() const;
 
     /**
      * @brief Determine if a second unit is equal to this one
@@ -157,7 +157,7 @@ public:
      * @param unit_2 The unit to compare against
      * @return True if unit_2 equals this unit
      */
-    inline bool operator== (Unit unit_2) const {return equal(unit_2);}
+    inline bool operator== (const Unit unit_2) const {return equal(unit_2);}
 
     // DocString: unit_neq
     /**
@@ -166,12 +166,12 @@ public:
      * @param unit_2 The unit to compare against
      * @return False if unit_2 equals this unit
      */
-    inline bool operator!= (Unit unit_2) const {return !equal(unit_2);}
+    inline bool operator!= (const Unit unit_2) const {return !equal(unit_2);}
 
     /**
      * @brief The dictionary
      */
-    inline std::map<std::string, double> dct(){return _dct;}
+    inline std::map<std::string, double> dct() const {return _dct;}
 };
 
 
diff --git a/src/inputs/InputParser.hpp b/src/inputs/InputParser.hpp
index 59ed031cc133c2a7af1555f118b06ea97b5cfede..e0b00ea653297535185df9b987ef8eeac4ab531c 100644
--- a/src/inputs/InputParser.hpp
+++ b/src/inputs/InputParser.hpp
@@ -36,7 +36,7 @@ class InputParser
 {
 public:
 
-    std::vector<std::string> _param_opset; //!< Map of parameterization operator set (set of operators and non-linear parameters used for a non-linear least squares fit to property)
+    const std::vector<std::string> _param_opset; //!< Map of parameterization operator set (set of operators and non-linear parameters used for a non-linear least squares fit to property)
     std::vector<std::string> _opset; //!< List of all operators for feature combination
     std::vector<double> _prop_train; //!< The values of the property in the training set
     std::vector<double> _prop_test; //!< The values of the property in the test set
@@ -47,12 +47,12 @@ public:
 
     Unit _prop_unit; //!< The Unit for the property
 
-    std::string _filename; //!< Name of the input file
-    std::string _data_file; //!< Name of the data file
-    std::string _prop_key; //!< Key used to find the property column in the data file
+    const std::string _filename; //!< Name of the input file
+    const std::string _data_file; //!< Name of the data file
+    const std::string _prop_key; //!< Key used to find the property column in the data file
     std::string _prop_label; //!< label used to describe the property
-    std::string _task_key; //!< Key used to find the task column in the data file
-    std::string _calc_type; //!< Type of projection operator to use
+    const std::string _task_key; //!< Key used to find the task column in the data file
+    const std::string _calc_type; //!< Type of projection operator to use
 
     std::shared_ptr<FeatureSpace> _feat_space; //!< shared_ptr to the FeatureSpace generated from the data file and the input file
 
@@ -60,17 +60,17 @@ public:
     double _l_bound; //!< Minimum absolute value allowed for the feature.
     double _u_bound; //!< Maximum absolute value allowed for the feature.
 
-    int _n_dim; //!< Number of dimensions to calculate
-    int _max_rung; //!< Maximum rung for the feature creation
-    int _max_store_rung; //!< Total rungs stored in memory
-    int _n_rung_generate; //!< Total number of rungs to generate on the fly
-    int _n_sis_select; //!< //!< Number of features to select for each dimensions
+    const int _n_dim; //!< Number of dimensions to calculate
+    const int _max_rung; //!< Maximum rung for the feature creation
+    const int _max_store_rung; //!< Total rungs stored in memory
+    const int _n_rung_generate; //!< Total number of rungs to generate on the fly
+    const int _n_sis_select; //!< //!< Number of features to select for each dimensions
     int _n_samp; //!< //!< Number of samples (training data)
-    int _n_residuals; //!< Number of residuals to pass to the next sis model
-    int _n_models_store; //!< Number of models to store
-    int _max_param_depth; //!< Max depth to parameterize a feature (default=_max_rung)
+    const int _n_residuals; //!< Number of residuals to pass to the next sis model
+    const int _n_models_store; //!< Number of models to store
+    const int _max_param_depth; //!< Max depth to parameterize a feature (default=_max_rung)
 
-    bool _fix_intercept; //!< If true force intercept to be 0.0
+    const bool _fix_intercept; //!< If true force intercept to be 0.0
     /**
      * @brief Constructor
      *
@@ -83,7 +83,7 @@ public:
     /**
      * @brief Accessor function to _feat_space
      */
-    inline std::shared_ptr<FeatureSpace> feat_space(){return _feat_space;}
+    inline std::shared_ptr<FeatureSpace> feat_space() const {return _feat_space;}
 
     /**
      * @brief Generate the feature space from input files and parameters
diff --git a/src/mpi_interface/MPI_Interface.cpp b/src/mpi_interface/MPI_Interface.cpp
index d0c18d4ead03bd59e21b7194a7ccc88811aec362..ad56fdb37702c3a372aa61f3593728eb54388497 100644
--- a/src/mpi_interface/MPI_Interface.cpp
+++ b/src/mpi_interface/MPI_Interface.cpp
@@ -3,7 +3,7 @@
 MPI_Interface::MPI_Interface() : boost::mpi::communicator()
 {}
 
-std::array<int, 2> MPI_Interface::get_start_end_from_list(int sz, int start)
+std::array<int, 2> MPI_Interface::get_start_end_from_list(const int sz, const int start)
 {
     int els_per_rank = sz / size();
     int remaineder = sz % size();
@@ -41,4 +41,4 @@ void mpi_setup::finalize_mpi_env()
 {
     delete env;
     env = 0;
-}
\ No newline at end of file
+}
diff --git a/src/mpi_interface/MPI_Interface.hpp b/src/mpi_interface/MPI_Interface.hpp
index 30116bbed7038db7c4a5c8c173247cd235c92ad2..1fd61694e4bd5f3a016a808199053b63777c04f4 100644
--- a/src/mpi_interface/MPI_Interface.hpp
+++ b/src/mpi_interface/MPI_Interface.hpp
@@ -38,7 +38,7 @@ public:
      *
      * @return     A unique tag to send information between two processes
      */
-    int cantorTagGen(unsigned int procSend, unsigned int procRecv, unsigned int maxOffest, unsigned int offest)
+    int cantorTagGen(const unsigned int procSend, const unsigned int procRecv, const unsigned int maxOffest, const unsigned int offest)
     {
         return (int((procSend + procRecv) * (procSend + procSend +1) / 2) + procRecv) * maxOffest + offest;
     }
@@ -51,7 +51,7 @@ public:
      *
      * @return The start and end indexes of what this rank is responsible for
      */
-    std::array<int, 2> get_start_end_from_list(int sz, int start=0);
+    std::array<int, 2> get_start_end_from_list(const int sz, const int start=0);
 
 };
 
diff --git a/src/mpi_interface/MPI_ops.cpp b/src/mpi_interface/MPI_ops.cpp
index 4adc7290e73e812a48a538453d032b276d93560b..d04260084c45f8dc4f780ee53ba4ca6e77b1a908 100644
--- a/src/mpi_interface/MPI_ops.cpp
+++ b/src/mpi_interface/MPI_ops.cpp
@@ -1,7 +1,7 @@
 #include<mpi_interface/MPI_ops.hpp>
 
 // MPI_Op top_feats;
-std::function<bool(double*, int, double, std::vector<node_sc_pair>&, double)> mpi_reduce_op::IS_VALID;
+std::function<bool(const double*, const int, const double, const std::vector<node_sc_pair>&, const double)> mpi_reduce_op::IS_VALID;
 double mpi_reduce_op::CROSS_COR_MAX;
 int mpi_reduce_op::N_SIS_SELECT;
 
diff --git a/src/mpi_interface/MPI_ops.hpp b/src/mpi_interface/MPI_ops.hpp
index df6f6592aebd7d5b5c06e6e65ad230addfc6d026..7441e263c811ab9f363c674b3a21574f16a3f56c 100644
--- a/src/mpi_interface/MPI_ops.hpp
+++ b/src/mpi_interface/MPI_ops.hpp
@@ -16,7 +16,7 @@
 namespace mpi_reduce_op
 {
     // extern MPI_Op top_feats;
-    extern std::function<bool(double*, int, double, std::vector<node_sc_pair>&, double)> IS_VALID; //!< Function used to calculate the scores for SIS without changing omp environment
+    extern std::function<bool(const double*, const int, const double, const std::vector<node_sc_pair>&, const double)> IS_VALID; //!< Function used to calculate the scores for SIS without changing omp environment
     extern double CROSS_COR_MAX; //!< The maximum cross correlation between features
     extern int N_SIS_SELECT; //!< The number of features to select
 
diff --git a/src/nl_opt/NLOptWrapper.cpp b/src/nl_opt/NLOptWrapper.cpp
index 9d96379e848cb4aa427272404b8021f3f60871e0..9487dcdfc032aefa235f86334ac297a32fd50c46 100644
--- a/src/nl_opt/NLOptWrapper.cpp
+++ b/src/nl_opt/NLOptWrapper.cpp
@@ -3,8 +3,8 @@ int nlopt_wrapper::MAX_PARAM_DEPTH = -1;
 NLOptimizer::NLOptimizer(
     const std::vector<int>& task_sizes,
     const std::vector<double>& prop,
-    int n_rung,
-    nlopt::func objective,
+    const int n_rung,
+    const nlopt::func objective,
     int max_param_depth,
     bool reset_max_param_depth
 ) :
@@ -61,7 +61,7 @@ NLOptimizer::NLOptimizer(
 NLOptimizerClassification::NLOptimizerClassification(
     const std::vector<int>& task_sizes,
     const std::vector<double>& prop,
-    int n_rung,
+    const int n_rung,
     int max_param_depth,
     bool reset_max_param_depth
 ) :
@@ -74,10 +74,10 @@ NLOptimizerClassification::NLOptimizerClassification(
 NLOptimizerRegression::NLOptimizerRegression(
     const std::vector<int>& task_sizes,
     const std::vector<double>& prop,
-    int n_rung,
+    const int n_rung,
     int max_param_depth,
-    double cauchy_scaling,
-    bool log_reg,
+    const double cauchy_scaling,
+    const bool log_reg,
     bool reset_max_param_depth
 ) :
     NLOptimizer(task_sizes, prop, n_rung, log_reg ? nlopt_wrapper::objective_log_reg : nlopt_wrapper::objective_reg, max_param_depth, reset_max_param_depth),
@@ -89,9 +89,9 @@ NLOptimizerRegression::NLOptimizerRegression(
 NLOptimizerLogRegression::NLOptimizerLogRegression(
     const std::vector<int>& task_sizes,
     const std::vector<double>& prop,
-    int n_rung,
+    const int n_rung,
     int max_param_depth,
-    double cauchy_scaling,
+    const double cauchy_scaling,
     bool reset_max_param_depth
 ) :
     NLOptimizerRegression(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, true, reset_max_param_depth)
@@ -432,9 +432,9 @@ std::shared_ptr<NLOptimizer> nlopt_wrapper::get_optimizer(
     std::string project_type,
     const std::vector<int>& task_sizes,
     const std::vector<double>& prop,
-    int n_rung,
+    const int n_rung,
     int max_param_depth,
-    double cauchy_scaling,
+    const double cauchy_scaling,
     bool reset_max_param_depth
 )
 {
diff --git a/src/nl_opt/NLOptWrapper.hpp b/src/nl_opt/NLOptWrapper.hpp
index 2507cd24527f0c5402dbe23819a644fc3b2dc9d3..f288cfdde2390dd2e69b7d76bdf9c772e631ac7d 100644
--- a/src/nl_opt/NLOptWrapper.hpp
+++ b/src/nl_opt/NLOptWrapper.hpp
@@ -14,17 +14,17 @@
 class NLOptimizer
 {
 protected:
-    nlopt::func _objective; //!< Objective function to use
+    const nlopt::func _objective; //!< Objective function to use
 
     std::vector<double> _a; //!< vector to store the A matrix for dgels
-    std::vector<double> _prop; //!< The property to fit the functions against
+    const std::vector<double> _prop; //!< The property to fit the functions against
     std::vector<double> _prop_copy; //!< Copy of the property to keep for dgels
     std::vector<double> _work; //!< work array for dgels
-    std::vector<double> _zeros; //!< array of zeros to fill parameters
-    std::vector<int> _task_sizes; //!< number of samples in each task
+    const std::vector<double> _zeros; //!< array of zeros to fill parameters
+    const std::vector<int> _task_sizes; //!< number of samples in each task
 
-    int _n_samp; //!< total number of samples
-    int _n_rung; //!< Maximum rung of the features
+    const int _n_samp; //!< total number of samples
+    const int _n_rung; //!< Maximum rung of the features
     int _max_params; //!< Maximum number of possible parameters
     int _max_param_depth; //!< parameterize features to all depths of the tree
 
@@ -43,8 +43,8 @@ public:
     NLOptimizer(
         const std::vector<int>& task_sizes,
         const std::vector<double>& prop,
-        int n_rung,
-        nlopt::func objective,
+        const int n_rung,
+        const nlopt::func objective,
         int max_param_depth=-1,
         bool reset_max_param_depth=false
     );
@@ -62,12 +62,12 @@ public:
     /**
      * @brief Accessor function to the task sizes
      */
-    inline std::vector<int>& task_sizes(){return _task_sizes;}
+    inline const std::vector<int>& task_sizes() const {return _task_sizes;}
 
     /**
      * @brief Accessor function to the the property vector
      */
-    inline std::vector<double>& prop(){return _prop;}
+    inline const std::vector<double>& prop() const {return _prop;}
 
     /**
      * @brief Accessor function to the pointer of the work vector's start
@@ -77,32 +77,32 @@ public:
     /**
      * @brief Accessor function to the number of samples
      */
-    inline int n_samp(){return _n_samp;}
+    inline int n_samp() const {return _n_samp;}
 
     /**
      * @brief Accessor function to the maximum number of rungs
      */
-    inline int n_rung(){return _n_rung;}
+    inline int n_rung() const {return _n_rung;}
 
     /**
      * @brief Accessor function to the maximum number of possible parameters
      */
-    inline int max_params(){return _max_params;}
+    inline int max_params() const {return _max_params;}
 
     /**
      * @brief Accessor function to the maximum parameter depth
      */
-    inline int max_param_depth(){return _max_param_depth;}
+    inline int max_param_depth() const {return _max_param_depth;}
 
     /**
      * @brief Accessor function to the default local optimization algorithm
      */
-    inline nlopt::algorithm local_opt_alg (){return _local_opt_alg;}
+    inline nlopt::algorithm local_opt_alg () const {return _local_opt_alg;}
 
     /**
      * @brief Accessor function to the convex hull optimizers (if the projection is a classification problem)
      */
-    virtual std::shared_ptr<ConvexHull1D> convex_hull() = 0;
+    virtual std::shared_ptr<ConvexHull1D> convex_hull() const = 0;
     /**
      * @brief Accessor function to calculate the feature gradient
      */
@@ -114,7 +114,7 @@ public:
     /**
      * @brief Accessor function to cauchy scaling factor for regression problems
      */
-    virtual double cauchy_scaling() = 0;
+    virtual double cauchy_scaling()const = 0;
 };
 
 class NLOptimizerClassification: public NLOptimizer
@@ -134,14 +134,14 @@ public:
     NLOptimizerClassification(
         const std::vector<int>& task_sizes,
         const std::vector<double>& prop,
-        int n_rung,
+        const int n_rung,
         int max_param_depth=-1,
         bool reset_max_param_depth=false
     );
     /**
      * @brief Accessor function to the convex hull optimizers (if the projection is a classification problem)
      */
-    inline std::shared_ptr<ConvexHull1D> convex_hull(){return _convex_hull;}
+    inline std::shared_ptr<ConvexHull1D> convex_hull() const {return _convex_hull;}
 
     /**
      * @brief Accessor function to the feature gradient (always nullptr)
@@ -156,7 +156,7 @@ public:
     /**
      * @brief Accessor function to the cauchy scaling factor (always nullptr)
      */
-    inline double cauchy_scaling(){return 0.0;}
+    inline double cauchy_scaling() const {return 0.0;}
 };
 
 class NLOptimizerRegression: public NLOptimizer
@@ -164,7 +164,7 @@ class NLOptimizerRegression: public NLOptimizer
 protected:
     std::vector<double> _feature_gradient; //!< vector used to calculate the contribution of feature derivatives to the gradient
     std::vector<double> _residuals; //!< storage space for the residuals
-    double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
+    const double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
 public:
     /**
      * @brief Constructor
@@ -180,17 +180,17 @@ public:
     NLOptimizerRegression(
         const std::vector<int>& task_sizes,
         const std::vector<double>& prop,
-        int n_rung,
+        const int n_rung,
         int max_param_depth=-1,
-        double cauchy_scaling=0.5,
-        bool log_reg=false,
+        const double cauchy_scaling=0.5,
+        const bool log_reg=false,
         bool reset_max_param_depth=false
     );
 
     /**
      * @brief Accessor function to the convex hull optimizers (always nullptr)
      */
-    inline std::shared_ptr<ConvexHull1D> convex_hull(){return nullptr;}
+    inline std::shared_ptr<ConvexHull1D> convex_hull() const {return nullptr;}
 
     /**
      * @brief Accessor function to the feature gradient
@@ -205,7 +205,7 @@ public:
     /**
      * @brief Accessor function to the cauchy scaling factor
      */
-    inline double cauchy_scaling(){return _cauchy_scaling;}
+    inline double cauchy_scaling() const {return _cauchy_scaling;}
 };
 
 class NLOptimizerLogRegression: public NLOptimizerRegression
@@ -224,437 +224,437 @@ public:
     NLOptimizerLogRegression(
         const std::vector<int>& task_sizes,
         const std::vector<double>& prop,
-        int n_rung,
+        const int n_rung,
         int max_param_depth=-1,
-        double cauchy_scaling=0.5,
+        const double cauchy_scaling=0.5,
         bool reset_max_param_depth=false
     );
 };
 
 namespace nlopt_wrapper
 {
-    extern int MAX_PARAM_DEPTH; //!< The maximum parameter depth for the problem
+extern int MAX_PARAM_DEPTH; //!< The maximum parameter depth for the problem
 
-    typedef struct
-    {
-        double* _prop; //!< pointer to the property vector
-        Node* _feat; //!< Node pointer of the feature to parameterize
-        NLOptimizer* _optimizer; //!< Data structure to store information for the optimization
-    } feat_data;
+typedef struct
+{
+    const double* _prop; //!< pointer to the property vector
+    Node* _feat; //!< Node pointer of the feature to parameterize
+    NLOptimizer* _optimizer; //!< Data structure to store information for the optimization
+} feat_data;
 
-    /**
-     * @brief The objective function for a classification problem
-     *
-     * @param n The number of parameters for the problem
-     * @param p Pointer to the start of the parameter vector
-     * @param grad Pointer to the start of the gradient calculator
-     * @param data Data structure to describe the problem
-     * @return The minimum value of the objective function
-     */
-    static double objective_class(unsigned int n, const double* p, double* grad, void* data);
+/**
+ * @brief The objective function for a classification problem
+ *
+ * @param n The number of parameters for the problem
+ * @param p Pointer to the start of the parameter vector
+ * @param grad Pointer to the start of the gradient calculator
+ * @param data Data structure to describe the problem
+ * @return The minimum value of the objective function
+ */
+static double objective_class(unsigned int n, const double* p, double* grad, void* data);
 
-    /**
-     * @brief The objective function for a regression problem
-     *
-     * @param n The number of parameters for the problem
-     * @param p Pointer to the start of the parameter vector
-     * @param grad Pointer to the start of the gradient calculator
-     * @param data Data structure to describe the problem
-     * @return The minimum value of the objective function
-     */
-    static double objective_reg(unsigned int n, const double* p, double* grad, void* data);
+/**
+ * @brief The objective function for a regression problem
+ *
+ * @param n The number of parameters for the problem
+ * @param p Pointer to the start of the parameter vector
+ * @param grad Pointer to the start of the gradient calculator
+ * @param data Data structure to describe the problem
+ * @return The minimum value of the objective function
+ */
+static double objective_reg(unsigned int n, const double* p, double* grad, void* data);
 
-    /**
-     * @brief The objective function for a log regression problem
-     *
-     * @param n The number of parameters for the problem
-     * @param p Pointer to the start of the parameter vector
-     * @param grad Pointer to the start of the gradient calculator
-     * @param data Data structure to describe the problem
-     * @return The minimum value of the objective function
-     */
-    static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data);
+/**
+ * @brief The objective function for a log regression problem
+ *
+ * @param n The number of parameters for the problem
+ * @param p Pointer to the start of the parameter vector
+ * @param grad Pointer to the start of the gradient calculator
+ * @param data Data structure to describe the problem
+ * @return The minimum value of the objective function
+ */
+static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data);
 
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param project_type The type of projection operator to optimize the features for
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param cauchy_scaling scaling factor used for the Cauchy loss function
-     * @param reset_max_param_depth If true reset the maximum parameter depth
-     *
-     * @return The correct optimizer
-     */
-    std::shared_ptr<NLOptimizer> get_optimizer(
-        std::string project_type,
-        const std::vector<int>& task_sizes,
-        const std::vector<double>& prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5,
-        bool reset_max_param_depth=false
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param project_type The type of projection operator to optimize the features for
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param cauchy_scaling scaling factor used for the Cauchy loss function
+ * @param reset_max_param_depth If true reset the maximum parameter depth
+ *
+ * @return The correct optimizer
+ */
+std::shared_ptr<NLOptimizer> get_optimizer(
+    std::string project_type,
+    const std::vector<int>& task_sizes,
+    const std::vector<double>& prop,
+    const int n_rung,
+    int max_param_depth=-1,
+    const double cauchy_scaling=0.5,
+    bool reset_max_param_depth=false
+);
+
+#ifdef PY_BINDINGS
+// DocString: nlopt_wrapper_get_reg_optimizer_list_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param cauchy_scaling scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerRegression get_reg_optimizer(
+    py::list task_sizes,
+    py::list prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        false,
+        true
     );
+}
 
-    #ifdef PY_BINDINGS
-    // DocString: nlopt_wrapper_get_reg_optimizer_list_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param cauchy_scaling scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerRegression get_reg_optimizer(
-        py::list task_sizes,
-        py::list prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            false,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_reg_optimizer_list_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param cauchy_scaling scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerRegression get_reg_optimizer(
-        py::list task_sizes,
-        np::ndarray prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            false,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_reg_optimizer_arr_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param cauchy_scaling scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerRegression get_reg_optimizer(
-        np::ndarray task_sizes,
-        py::list prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            false,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_reg_optimizer_arr_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param cauchy_scaling scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerRegression get_reg_optimizer(
-        np::ndarray task_sizes,
-        np::ndarray prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            false,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_log_reg_optimizer_list_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerLogRegression get_log_reg_optimizer(
-        py::list task_sizes,
-        py::list prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerLogRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_log_reg_optimizer_list_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerLogRegression get_log_reg_optimizer(
-        py::list task_sizes,
-        np::ndarray prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerLogRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerLogRegression get_log_reg_optimizer(
-        np::ndarray task_sizes,
-        py::list prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerLogRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     * @param scaling factor used for the Cauchy loss function
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerLogRegression get_log_reg_optimizer(
-        np::ndarray task_sizes,
-        np::ndarray prop,
-        int n_rung,
-        int max_param_depth=-1,
-        double cauchy_scaling=0.5
-    )
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerLogRegression(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            cauchy_scaling,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_class_optimizer_list_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerClassification(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_class_optimizer_list_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerClassification(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_class_optimizer_arr_list
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-        return NLOptimizerClassification(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            true
-        );
-    }
-
-    // DocString: nlopt_wrapper_get_class_optimizer_arr_arr
-    /**
-     * @brief Get an optimizer for the desired task
-     *
-     * @param task_sizes number of samples in each task
-     * @param prop The property to fit the functions against
-     * @param n_rung Maximum rung of the features
-     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
-     *
-     * @return The correct optimizer
-     */
-    inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
-    {
-        std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
-        std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-        return NLOptimizerClassification(
-            ts_vec,
-            prop_vec,
-            n_rung,
-            max_param_depth,
-            true
-        );
-    }
-    #endif
+// DocString: nlopt_wrapper_get_reg_optimizer_list_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param cauchy_scaling scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerRegression get_reg_optimizer(
+    py::list task_sizes,
+    np::ndarray prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        false,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_reg_optimizer_arr_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param cauchy_scaling scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerRegression get_reg_optimizer(
+    np::ndarray task_sizes,
+    py::list prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        false,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_reg_optimizer_arr_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param cauchy_scaling scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerRegression get_reg_optimizer(
+    np::ndarray task_sizes,
+    np::ndarray prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        false,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_log_reg_optimizer_list_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerLogRegression get_log_reg_optimizer(
+    py::list task_sizes,
+    py::list prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerLogRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_log_reg_optimizer_list_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerLogRegression get_log_reg_optimizer(
+    py::list task_sizes,
+    np::ndarray prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerLogRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_log_reg_optimizer_arr_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerLogRegression get_log_reg_optimizer(
+    np::ndarray task_sizes,
+    py::list prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerLogRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_log_reg_optimizer_arr_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ * @param scaling factor used for the Cauchy loss function
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerLogRegression get_log_reg_optimizer(
+    np::ndarray task_sizes,
+    np::ndarray prop,
+    int n_rung,
+    int max_param_depth=-1,
+    double cauchy_scaling=0.5
+)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerLogRegression(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        cauchy_scaling,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_class_optimizer_list_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerClassification(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_class_optimizer_list_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerClassification(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_class_optimizer_arr_list
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+    return NLOptimizerClassification(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        true
+    );
+}
+
+// DocString: nlopt_wrapper_get_class_optimizer_arr_arr
+/**
+ * @brief Get an optimizer for the desired task
+ *
+ * @param task_sizes number of samples in each task
+ * @param prop The property to fit the functions against
+ * @param n_rung Maximum rung of the features
+ * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+ *
+ * @return The correct optimizer
+ */
+inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
+{
+    std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+    std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+    return NLOptimizerClassification(
+        ts_vec,
+        prop_vec,
+        n_rung,
+        max_param_depth,
+        true
+    );
+}
+#endif
 
 }
 
diff --git a/src/python/bindings_docstring_keyed.cpp b/src/python/bindings_docstring_keyed.cpp
index 68679e98a4c64d9a702826bfbe0ff152715a65ad..e6b7c010bd70c324cc6eeb0eae82b3dfb00ecd76 100644
--- a/src/python/bindings_docstring_keyed.cpp
+++ b/src/python/bindings_docstring_keyed.cpp
@@ -237,10 +237,10 @@ void sisso::feature_creation::registerUnit()
 
 void sisso::feature_creation::node::registerFeatureNode()
 {
-    std::string (FeatureNode::*expr_1)() = &FeatureNode::expr;
+    std::string (FeatureNode::*expr_1)() const = &FeatureNode::expr;
     std::string (FeatureNode::*expr_const)() const = &FeatureNode::expr;
-    void (FeatureNode::*set_value_no_param)(int, bool) = &FeatureNode::set_value;
-    void (FeatureNode::*set_test_value_no_param)(int, bool) = &FeatureNode::set_test_value;
+    void (FeatureNode::*set_value_no_param)(int, bool) const = &FeatureNode::set_value;
+    void (FeatureNode::*set_test_value_no_param)(int, bool) const = &FeatureNode::set_test_value;
 
     using namespace boost::python;
     class_<FeatureNode, bases<Node>>("FeatureNode", init<int, std::string, np::ndarray, np::ndarray, Unit>())
@@ -260,7 +260,7 @@ void sisso::feature_creation::node::registerFeatureNode()
 
 void sisso::feature_creation::node::registerModelNode()
 {
-    std::string (ModelNode::*expr_1)() = &ModelNode::expr;
+    std::string (ModelNode::*expr_1)() const = &ModelNode::expr;
     std::string (ModelNode::*expr_const)() const = &ModelNode::expr;
 
     np::ndarray (ModelNode::*eval_many_dict)(py::dict) = &ModelNode::eval_many_py;
@@ -290,9 +290,9 @@ void sisso::feature_creation::node::registerModelNode()
 
 void sisso::feature_creation::node::registerAddNode()
 {
-    void (AddNode::*set_value_no_param)(int, bool) = &AddNode::set_value;
-    void (AddNode::*set_test_value_no_param)(int, bool) = &AddNode::set_test_value;
-    std::string (AddNode::*expr_no_param)() = &AddNode::expr;
+    void (AddNode::*set_value_no_param)(int, bool) const = &AddNode::set_value;
+    void (AddNode::*set_test_value_no_param)(int, bool) const = &AddNode::set_test_value;
+    std::string (AddNode::*expr_no_param)() const = &AddNode::expr;
 
     class_<AddNode, bases<OperatorNode<2>>>("AddNode", init<node_ptr, node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_add_node_set_value@")
@@ -305,9 +305,9 @@ void sisso::feature_creation::node::registerAddNode()
 
 void sisso::feature_creation::node::registerSubNode()
 {
-    void (SubNode::*set_value_no_param)(int, bool) = &SubNode::set_value;
-    void (SubNode::*set_test_value_no_param)(int, bool) = &SubNode::set_test_value;
-    std::string (SubNode::*expr_no_param)() = &SubNode::expr;
+    void (SubNode::*set_value_no_param)(int, bool) const = &SubNode::set_value;
+    void (SubNode::*set_test_value_no_param)(int, bool) const = &SubNode::set_test_value;
+    std::string (SubNode::*expr_no_param)() const = &SubNode::expr;
 
     class_<SubNode, bases<OperatorNode<2>>>("SubNode", init<node_ptr, node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_sub_node_set_value@")
@@ -320,9 +320,9 @@ void sisso::feature_creation::node::registerSubNode()
 
 void sisso::feature_creation::node::registerDivNode()
 {
-    void (DivNode::*set_value_no_param)(int, bool) = &DivNode::set_value;
-    void (DivNode::*set_test_value_no_param)(int, bool) = &DivNode::set_test_value;
-    std::string (DivNode::*expr_no_param)() = &DivNode::expr;
+    void (DivNode::*set_value_no_param)(int, bool) const = &DivNode::set_value;
+    void (DivNode::*set_test_value_no_param)(int, bool) const = &DivNode::set_test_value;
+    std::string (DivNode::*expr_no_param)() const = &DivNode::expr;
 
     class_<DivNode, bases<OperatorNode<2>>>("DivNode", init<node_ptr, node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_div_node_set_value@")
@@ -335,9 +335,9 @@ void sisso::feature_creation::node::registerDivNode()
 
 void sisso::feature_creation::node::registerMultNode()
 {
-    void (MultNode::*set_value_no_param)(int, bool) = &MultNode::set_value;
-    void (MultNode::*set_test_value_no_param)(int, bool) = &MultNode::set_test_value;
-    std::string (MultNode::*expr_no_param)() = &MultNode::expr;
+    void (MultNode::*set_value_no_param)(int, bool) const = &MultNode::set_value;
+    void (MultNode::*set_test_value_no_param)(int, bool) const = &MultNode::set_test_value;
+    std::string (MultNode::*expr_no_param)() const = &MultNode::expr;
 
     class_<MultNode, bases<OperatorNode<2>>>("MultNode", init<node_ptr, node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_mult_node_set_value@")
@@ -350,9 +350,9 @@ void sisso::feature_creation::node::registerMultNode()
 
 void sisso::feature_creation::node::registerAbsDiffNode()
 {
-    void (AbsDiffNode::*set_value_no_param)(int, bool) = &AbsDiffNode::set_value;
-    void (AbsDiffNode::*set_test_value_no_param)(int, bool) = &AbsDiffNode::set_test_value;
-    std::string (AbsDiffNode::*expr_no_param)() = &AbsDiffNode::expr;
+    void (AbsDiffNode::*set_value_no_param)(int, bool) const = &AbsDiffNode::set_value;
+    void (AbsDiffNode::*set_test_value_no_param)(int, bool) const = &AbsDiffNode::set_test_value;
+    std::string (AbsDiffNode::*expr_no_param)() const = &AbsDiffNode::expr;
 
     class_<AbsDiffNode, bases<OperatorNode<2>>>("AbsDiffNode", init<node_ptr, node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_abs_diff_node_set_value@")
@@ -365,9 +365,9 @@ void sisso::feature_creation::node::registerAbsDiffNode()
 
 void sisso::feature_creation::node::registerAbsNode()
 {
-    void (AbsNode::*set_value_no_param)(int, bool) = &AbsNode::set_value;
-    void (AbsNode::*set_test_value_no_param)(int, bool) = &AbsNode::set_test_value;
-    std::string (AbsNode::*expr_no_param)() = &AbsNode::expr;
+    void (AbsNode::*set_value_no_param)(int, bool) const = &AbsNode::set_value;
+    void (AbsNode::*set_test_value_no_param)(int, bool) const = &AbsNode::set_test_value;
+    std::string (AbsNode::*expr_no_param)() const = &AbsNode::expr;
 
     class_<AbsNode, bases<OperatorNode<1>>>("AbsNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_abs_node_set_value@")
@@ -380,9 +380,9 @@ void sisso::feature_creation::node::registerAbsNode()
 
 void sisso::feature_creation::node::registerInvNode()
 {
-    void (InvNode::*set_value_no_param)(int, bool) = &InvNode::set_value;
-    void (InvNode::*set_test_value_no_param)(int, bool) = &InvNode::set_test_value;
-    std::string (InvNode::*expr_no_param)() = &InvNode::expr;
+    void (InvNode::*set_value_no_param)(int, bool) const = &InvNode::set_value;
+    void (InvNode::*set_test_value_no_param)(int, bool) const = &InvNode::set_test_value;
+    std::string (InvNode::*expr_no_param)() const = &InvNode::expr;
 
     class_<InvNode, bases<OperatorNode<1>>>("InvNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_inv_node_set_value@")
@@ -395,9 +395,9 @@ void sisso::feature_creation::node::registerInvNode()
 
 void sisso::feature_creation::node::registerLogNode()
 {
-    void (LogNode::*set_value_no_param)(int, bool) = &LogNode::set_value;
-    void (LogNode::*set_test_value_no_param)(int, bool) = &LogNode::set_test_value;
-    std::string (LogNode::*expr_no_param)() = &LogNode::expr;
+    void (LogNode::*set_value_no_param)(int, bool) const = &LogNode::set_value;
+    void (LogNode::*set_test_value_no_param)(int, bool) const = &LogNode::set_test_value;
+    std::string (LogNode::*expr_no_param)() const = &LogNode::expr;
 
     class_<LogNode, bases<OperatorNode<1>>>("LogNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_log_node_set_value@")
@@ -410,9 +410,9 @@ void sisso::feature_creation::node::registerLogNode()
 
 void sisso::feature_creation::node::registerExpNode()
 {
-    void (ExpNode::*set_value_no_param)(int, bool) = &ExpNode::set_value;
-    void (ExpNode::*set_test_value_no_param)(int, bool) = &ExpNode::set_test_value;
-    std::string (ExpNode::*expr_no_param)() = &ExpNode::expr;
+    void (ExpNode::*set_value_no_param)(int, bool) const = &ExpNode::set_value;
+    void (ExpNode::*set_test_value_no_param)(int, bool) const = &ExpNode::set_test_value;
+    std::string (ExpNode::*expr_no_param)() const = &ExpNode::expr;
 
     class_<ExpNode, bases<OperatorNode<1>>>("ExpNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_exp_node_set_value@")
@@ -425,9 +425,9 @@ void sisso::feature_creation::node::registerExpNode()
 
 void sisso::feature_creation::node::registerNegExpNode()
 {
-    void (NegExpNode::*set_value_no_param)(int, bool) = &NegExpNode::set_value;
-    void (NegExpNode::*set_test_value_no_param)(int, bool) = &NegExpNode::set_test_value;
-    std::string (NegExpNode::*expr_no_param)() = &NegExpNode::expr;
+    void (NegExpNode::*set_value_no_param)(int, bool) const = &NegExpNode::set_value;
+    void (NegExpNode::*set_test_value_no_param)(int, bool) const = &NegExpNode::set_test_value;
+    std::string (NegExpNode::*expr_no_param)() const = &NegExpNode::expr;
 
     class_<NegExpNode, bases<OperatorNode<1>>>("NegExpNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_neg_exp_node_set_value@")
@@ -440,9 +440,9 @@ void sisso::feature_creation::node::registerNegExpNode()
 
 void sisso::feature_creation::node::registerSinNode()
 {
-    void (SinNode::*set_value_no_param)(int, bool) = &SinNode::set_value;
-    void (SinNode::*set_test_value_no_param)(int, bool) = &SinNode::set_test_value;
-    std::string (SinNode::*expr_no_param)() = &SinNode::expr;
+    void (SinNode::*set_value_no_param)(int, bool) const = &SinNode::set_value;
+    void (SinNode::*set_test_value_no_param)(int, bool) const = &SinNode::set_test_value;
+    std::string (SinNode::*expr_no_param)() const = &SinNode::expr;
 
     class_<SinNode, bases<OperatorNode<1>>>("SinNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_sin_node_set_value@")
@@ -455,9 +455,9 @@ void sisso::feature_creation::node::registerSinNode()
 
 void sisso::feature_creation::node::registerCosNode()
 {
-    void (CosNode::*set_value_no_param)(int, bool) = &CosNode::set_value;
-    void (CosNode::*set_test_value_no_param)(int, bool) = &CosNode::set_test_value;
-    std::string (CosNode::*expr_no_param)() = &CosNode::expr;
+    void (CosNode::*set_value_no_param)(int, bool) const = &CosNode::set_value;
+    void (CosNode::*set_test_value_no_param)(int, bool) const = &CosNode::set_test_value;
+    std::string (CosNode::*expr_no_param)() const = &CosNode::expr;
 
     class_<CosNode, bases<OperatorNode<1>>>("CosNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_cos_node_set_value@")
@@ -470,9 +470,9 @@ void sisso::feature_creation::node::registerCosNode()
 
 void sisso::feature_creation::node::registerCbNode()
 {
-    void (CbNode::*set_value_no_param)(int, bool) = &CbNode::set_value;
-    void (CbNode::*set_test_value_no_param)(int, bool) = &CbNode::set_test_value;
-    std::string (CbNode::*expr_no_param)() = &CbNode::expr;
+    void (CbNode::*set_value_no_param)(int, bool) const = &CbNode::set_value;
+    void (CbNode::*set_test_value_no_param)(int, bool) const = &CbNode::set_test_value;
+    std::string (CbNode::*expr_no_param)() const = &CbNode::expr;
 
     class_<CbNode, bases<OperatorNode<1>>>("CbNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_cb_node_set_value@")
@@ -485,9 +485,9 @@ void sisso::feature_creation::node::registerCbNode()
 
 void sisso::feature_creation::node::registerCbrtNode()
 {
-    void (CbrtNode::*set_value_no_param)(int, bool) = &CbrtNode::set_value;
-    void (CbrtNode::*set_test_value_no_param)(int, bool) = &CbrtNode::set_test_value;
-    std::string (CbrtNode::*expr_no_param)() = &CbrtNode::expr;
+    void (CbrtNode::*set_value_no_param)(int, bool) const = &CbrtNode::set_value;
+    void (CbrtNode::*set_test_value_no_param)(int, bool) const = &CbrtNode::set_test_value;
+    std::string (CbrtNode::*expr_no_param)() const = &CbrtNode::expr;
 
     class_<CbrtNode, bases<OperatorNode<1>>>("CbrtNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_cbrt_node_set_value@")
@@ -500,9 +500,9 @@ void sisso::feature_creation::node::registerCbrtNode()
 
 void sisso::feature_creation::node::registerSqNode()
 {
-    void (SqNode::*set_value_no_param)(int, bool) = &SqNode::set_value;
-    void (SqNode::*set_test_value_no_param)(int, bool) = &SqNode::set_test_value;
-    std::string (SqNode::*expr_no_param)() = &SqNode::expr;
+    void (SqNode::*set_value_no_param)(int, bool) const = &SqNode::set_value;
+    void (SqNode::*set_test_value_no_param)(int, bool) const = &SqNode::set_test_value;
+    std::string (SqNode::*expr_no_param)() const = &SqNode::expr;
 
     class_<SqNode, bases<OperatorNode<1>>>("SqNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_sq_node_set_value@")
@@ -515,9 +515,9 @@ void sisso::feature_creation::node::registerSqNode()
 
 void sisso::feature_creation::node::registerSqrtNode()
 {
-    void (SqrtNode::*set_value_no_param)(int, bool) = &SqrtNode::set_value;
-    void (SqrtNode::*set_test_value_no_param)(int, bool) = &SqrtNode::set_test_value;
-    std::string (SqrtNode::*expr_no_param)() = &SqrtNode::expr;
+    void (SqrtNode::*set_value_no_param)(int, bool) const = &SqrtNode::set_value;
+    void (SqrtNode::*set_test_value_no_param)(int, bool) const = &SqrtNode::set_test_value;
+    std::string (SqrtNode::*expr_no_param)() const = &SqrtNode::expr;
 
     class_<SqrtNode, bases<OperatorNode<1>>>("SqrtNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_sqrt_node_set_value@")
@@ -530,9 +530,9 @@ void sisso::feature_creation::node::registerSqrtNode()
 
 void sisso::feature_creation::node::registerSixPowNode()
 {
-    void (SixPowNode::*set_value_no_param)(int, bool) = &SixPowNode::set_value;
-    void (SixPowNode::*set_test_value_no_param)(int, bool) = &SixPowNode::set_test_value;
-    std::string (SixPowNode::*expr_no_param)() = &SixPowNode::expr;
+    void (SixPowNode::*set_value_no_param)(int, bool) const = &SixPowNode::set_value;
+    void (SixPowNode::*set_test_value_no_param)(int, bool) const = &SixPowNode::set_test_value;
+    std::string (SixPowNode::*expr_no_param)() const = &SixPowNode::expr;
 
     class_<SixPowNode, bases<OperatorNode<1>>>("SixPowNode", init<node_ptr, int, double, double>())
         .def("set_value", set_value_no_param, "@DocString_six_pow_node_set_value@")
@@ -545,9 +545,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 #ifdef PARAMETERIZE
     void sisso::feature_creation::node::registerAddParamNode()
     {
-        void (AddParamNode::*set_value_no_param)(int, bool) = &AddParamNode::set_value;
-        void (AddParamNode::*set_test_value_no_param)(int, bool) = &AddParamNode::set_test_value;
-        std::string (AddParamNode::*expr_no_param)() = &AddParamNode::expr;
+        void (AddParamNode::*set_value_no_param)(int, bool) const = &AddParamNode::set_value;
+        void (AddParamNode::*set_test_value_no_param)(int, bool) const = &AddParamNode::set_test_value;
+        std::string (AddParamNode::*expr_no_param)() const = &AddParamNode::expr;
 
         class_<AddParamNode, bases<AddNode>>("AddParamNode", init<node_ptr, node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_add_param_node_set_value@")
@@ -560,9 +560,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerSubParamNode()
     {
-        void (SubParamNode::*set_value_no_param)(int, bool) = &SubParamNode::set_value;
-        void (SubParamNode::*set_test_value_no_param)(int, bool) = &SubParamNode::set_test_value;
-        std::string (SubParamNode::*expr_no_param)() = &SubParamNode::expr;
+        void (SubParamNode::*set_value_no_param)(int, bool) const = &SubParamNode::set_value;
+        void (SubParamNode::*set_test_value_no_param)(int, bool) const = &SubParamNode::set_test_value;
+        std::string (SubParamNode::*expr_no_param)() const = &SubParamNode::expr;
 
         class_<SubParamNode, bases<SubNode>>("SubParamNode", init<node_ptr, node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_sub_param_node_set_value@")
@@ -575,9 +575,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerDivParamNode()
     {
-        void (DivParamNode::*set_value_no_param)(int, bool) = &DivParamNode::set_value;
-        void (DivParamNode::*set_test_value_no_param)(int, bool) = &DivParamNode::set_test_value;
-        std::string (DivParamNode::*expr_no_param)() = &DivParamNode::expr;
+        void (DivParamNode::*set_value_no_param)(int, bool) const = &DivParamNode::set_value;
+        void (DivParamNode::*set_test_value_no_param)(int, bool) const = &DivParamNode::set_test_value;
+        std::string (DivParamNode::*expr_no_param)() const = &DivParamNode::expr;
 
         class_<DivParamNode, bases<DivNode>>("DivParamNode", init<node_ptr, node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_div_param_node_set_value@")
@@ -590,9 +590,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerMultParamNode()
     {
-        void (MultParamNode::*set_value_no_param)(int, bool) = &MultParamNode::set_value;
-        void (MultParamNode::*set_test_value_no_param)(int, bool) = &MultParamNode::set_test_value;
-        std::string (MultParamNode::*expr_no_param)() = &MultParamNode::expr;
+        void (MultParamNode::*set_value_no_param)(int, bool) const = &MultParamNode::set_value;
+        void (MultParamNode::*set_test_value_no_param)(int, bool) const = &MultParamNode::set_test_value;
+        std::string (MultParamNode::*expr_no_param)() const = &MultParamNode::expr;
 
         class_<MultParamNode, bases<MultNode>>("MultParamNode", init<node_ptr, node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_mult_param_node_set_value@")
@@ -605,9 +605,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerAbsDiffParamNode()
     {
-        void (AbsDiffParamNode::*set_value_no_param)(int, bool) = &AbsDiffParamNode::set_value;
-        void (AbsDiffParamNode::*set_test_value_no_param)(int, bool) = &AbsDiffParamNode::set_test_value;
-        std::string (AbsDiffParamNode::*expr_no_param)() = &AbsDiffParamNode::expr;
+        void (AbsDiffParamNode::*set_value_no_param)(int, bool) const = &AbsDiffParamNode::set_value;
+        void (AbsDiffParamNode::*set_test_value_no_param)(int, bool) const = &AbsDiffParamNode::set_test_value;
+        std::string (AbsDiffParamNode::*expr_no_param)() const = &AbsDiffParamNode::expr;
 
         class_<AbsDiffParamNode, bases<AbsDiffNode>>("AbsDiffParamNode", init<node_ptr, node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_abs_diff_param_node_set_value@")
@@ -620,9 +620,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerAbsParamNode()
     {
-        void (AbsParamNode::*set_value_no_param)(int, bool) = &AbsParamNode::set_value;
-        void (AbsParamNode::*set_test_value_no_param)(int, bool) = &AbsParamNode::set_test_value;
-        std::string (AbsParamNode::*expr_no_param)() = &AbsParamNode::expr;
+        void (AbsParamNode::*set_value_no_param)(int, bool) const = &AbsParamNode::set_value;
+        void (AbsParamNode::*set_test_value_no_param)(int, bool) const = &AbsParamNode::set_test_value;
+        std::string (AbsParamNode::*expr_no_param)() const = &AbsParamNode::expr;
 
         class_<AbsParamNode, bases<AbsNode>>("AbsParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_abs_param_node_set_value@")
@@ -635,9 +635,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerInvParamNode()
     {
-        void (InvParamNode::*set_value_no_param)(int, bool) = &InvParamNode::set_value;
-        void (InvParamNode::*set_test_value_no_param)(int, bool) = &InvParamNode::set_test_value;
-        std::string (InvParamNode::*expr_no_param)() = &InvParamNode::expr;
+        void (InvParamNode::*set_value_no_param)(int, bool) const = &InvParamNode::set_value;
+        void (InvParamNode::*set_test_value_no_param)(int, bool) const = &InvParamNode::set_test_value;
+        std::string (InvParamNode::*expr_no_param)() const = &InvParamNode::expr;
 
         class_<InvParamNode, bases<InvNode>>("InvParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_inv_param_node_set_value@")
@@ -650,9 +650,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerLogParamNode()
     {
-        void (LogParamNode::*set_value_no_param)(int, bool) = &LogParamNode::set_value;
-        void (LogParamNode::*set_test_value_no_param)(int, bool) = &LogParamNode::set_test_value;
-        std::string (LogParamNode::*expr_no_param)() = &LogParamNode::expr;
+        void (LogParamNode::*set_value_no_param)(int, bool) const = &LogParamNode::set_value;
+        void (LogParamNode::*set_test_value_no_param)(int, bool) const = &LogParamNode::set_test_value;
+        std::string (LogParamNode::*expr_no_param)() const = &LogParamNode::expr;
 
         class_<LogParamNode, bases<LogNode>>("LogParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_log_param_node_set_value@")
@@ -665,9 +665,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerExpParamNode()
     {
-        void (ExpParamNode::*set_value_no_param)(int, bool) = &ExpParamNode::set_value;
-        void (ExpParamNode::*set_test_value_no_param)(int, bool) = &ExpParamNode::set_test_value;
-        std::string (ExpParamNode::*expr_no_param)() = &ExpParamNode::expr;
+        void (ExpParamNode::*set_value_no_param)(int, bool) const = &ExpParamNode::set_value;
+        void (ExpParamNode::*set_test_value_no_param)(int, bool) const = &ExpParamNode::set_test_value;
+        std::string (ExpParamNode::*expr_no_param)() const = &ExpParamNode::expr;
 
         class_<ExpParamNode, bases<ExpNode>>("ExpParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_exp_param_node_set_value@")
@@ -680,9 +680,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerNegExpParamNode()
     {
-        void (NegExpParamNode::*set_value_no_param)(int, bool) = &NegExpParamNode::set_value;
-        void (NegExpParamNode::*set_test_value_no_param)(int, bool) = &NegExpParamNode::set_test_value;
-        std::string (NegExpParamNode::*expr_no_param)() = &NegExpParamNode::expr;
+        void (NegExpParamNode::*set_value_no_param)(int, bool) const = &NegExpParamNode::set_value;
+        void (NegExpParamNode::*set_test_value_no_param)(int, bool) const = &NegExpParamNode::set_test_value;
+        std::string (NegExpParamNode::*expr_no_param)() const = &NegExpParamNode::expr;
 
         class_<NegExpParamNode, bases<NegExpNode>>("NegExpParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_neg_exp_param_node_set_value@")
@@ -695,9 +695,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerSinParamNode()
     {
-        void (SinParamNode::*set_value_no_param)(int, bool) = &SinParamNode::set_value;
-        void (SinParamNode::*set_test_value_no_param)(int, bool) = &SinParamNode::set_test_value;
-        std::string (SinParamNode::*expr_no_param)() = &SinParamNode::expr;
+        void (SinParamNode::*set_value_no_param)(int, bool) const = &SinParamNode::set_value;
+        void (SinParamNode::*set_test_value_no_param)(int, bool) const = &SinParamNode::set_test_value;
+        std::string (SinParamNode::*expr_no_param)() const = &SinParamNode::expr;
 
         class_<SinParamNode, bases<SinNode>>("SinParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_sin_param_node_set_value@")
@@ -710,9 +710,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerCosParamNode()
     {
-        void (CosParamNode::*set_value_no_param)(int, bool) = &CosParamNode::set_value;
-        void (CosParamNode::*set_test_value_no_param)(int, bool) = &CosParamNode::set_test_value;
-        std::string (CosParamNode::*expr_no_param)() = &CosParamNode::expr;
+        void (CosParamNode::*set_value_no_param)(int, bool) const = &CosParamNode::set_value;
+        void (CosParamNode::*set_test_value_no_param)(int, bool) const = &CosParamNode::set_test_value;
+        std::string (CosParamNode::*expr_no_param)() const = &CosParamNode::expr;
 
         class_<CosParamNode, bases<CosNode>>("CosParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_cos_param_node_set_value@")
@@ -725,9 +725,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerCbParamNode()
     {
-        void (CbParamNode::*set_value_no_param)(int, bool) = &CbParamNode::set_value;
-        void (CbParamNode::*set_test_value_no_param)(int, bool) = &CbParamNode::set_test_value;
-        std::string (CbParamNode::*expr_no_param)() = &CbParamNode::expr;
+        void (CbParamNode::*set_value_no_param)(int, bool) const = &CbParamNode::set_value;
+        void (CbParamNode::*set_test_value_no_param)(int, bool) const = &CbParamNode::set_test_value;
+        std::string (CbParamNode::*expr_no_param)() const = &CbParamNode::expr;
 
         class_<CbParamNode, bases<CbNode>>("CbParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_cb_param_node_set_value@")
@@ -740,9 +740,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerCbrtParamNode()
     {
-        void (CbrtParamNode::*set_value_no_param)(int, bool) = &CbrtParamNode::set_value;
-        void (CbrtParamNode::*set_test_value_no_param)(int, bool) = &CbrtParamNode::set_test_value;
-        std::string (CbrtParamNode::*expr_no_param)() = &CbrtParamNode::expr;
+        void (CbrtParamNode::*set_value_no_param)(int, bool) const = &CbrtParamNode::set_value;
+        void (CbrtParamNode::*set_test_value_no_param)(int, bool) const = &CbrtParamNode::set_test_value;
+        std::string (CbrtParamNode::*expr_no_param)() const = &CbrtParamNode::expr;
 
         class_<CbrtParamNode, bases<CbrtNode>>("CbrtParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_cbrt_param_node_set_value@")
@@ -755,9 +755,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerSqParamNode()
     {
-        void (SqParamNode::*set_value_no_param)(int, bool) = &SqParamNode::set_value;
-        void (SqParamNode::*set_test_value_no_param)(int, bool) = &SqParamNode::set_test_value;
-        std::string (SqParamNode::*expr_no_param)() = &SqParamNode::expr;
+        void (SqParamNode::*set_value_no_param)(int, bool) const = &SqParamNode::set_value;
+        void (SqParamNode::*set_test_value_no_param)(int, bool) const = &SqParamNode::set_test_value;
+        std::string (SqParamNode::*expr_no_param)() const = &SqParamNode::expr;
 
         class_<SqParamNode, bases<SqNode>>("SqParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_sq_param_node_set_value@")
@@ -770,9 +770,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerSqrtParamNode()
     {
-        void (SqrtParamNode::*set_value_no_param)(int, bool) = &SqrtParamNode::set_value;
-        void (SqrtParamNode::*set_test_value_no_param)(int, bool) = &SqrtParamNode::set_test_value;
-        std::string (SqrtParamNode::*expr_no_param)() = &SqrtParamNode::expr;
+        void (SqrtParamNode::*set_value_no_param)(int, bool) const = &SqrtParamNode::set_value;
+        void (SqrtParamNode::*set_test_value_no_param)(int, bool) const = &SqrtParamNode::set_test_value;
+        std::string (SqrtParamNode::*expr_no_param)() const = &SqrtParamNode::expr;
 
         class_<SqrtParamNode, bases<SqrtNode>>("SqrtParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_sqrt_param_node_set_value@")
@@ -785,9 +785,9 @@ void sisso::feature_creation::node::registerSixPowNode()
 
     void sisso::feature_creation::node::registerSixPowParamNode()
     {
-        void (SixPowParamNode::*set_value_no_param)(int, bool) = &SixPowParamNode::set_value;
-        void (SixPowParamNode::*set_test_value_no_param)(int, bool) = &SixPowParamNode::set_test_value;
-        std::string (SixPowParamNode::*expr_no_param)() = &SixPowParamNode::expr;
+        void (SixPowParamNode::*set_value_no_param)(int, bool) const = &SixPowParamNode::set_value;
+        void (SixPowParamNode::*set_test_value_no_param)(int, bool) const = &SixPowParamNode::set_test_value;
+        std::string (SixPowParamNode::*expr_no_param)() const = &SixPowParamNode::expr;
 
         class_<SixPowParamNode, bases<SixPowNode>>("SixPowParamNode", init<node_ptr, int, double, double>())
             .def("set_value", set_value_no_param, "@DocString_six_pow_param_node_set_value@")
diff --git a/src/python/bindings_docstring_keyed.hpp b/src/python/bindings_docstring_keyed.hpp
index 0e5288d1538f8e5ad1e29789df479e9684e59680..c459f2fa7de97ef01c8b99500825fc99f47d06d4 100644
--- a/src/python/bindings_docstring_keyed.hpp
+++ b/src/python/bindings_docstring_keyed.hpp
@@ -31,15 +31,16 @@ namespace sisso
         static void registerFeatureSpace();
         static void registerDomain();
         static void registerUnit();
+        #ifdef PARAMETERIZE
         namespace nloptimizer
         {
             struct NLOptimizerWrap : NLOptimizer, py::wrapper<NLOptimizer>
             {
             public:
-                inline std::shared_ptr<ConvexHull1D> convex_hull(){return this->get_override("convex_hull")();}
+                inline std::shared_ptr<ConvexHull1D> convex_hull()const {return this->get_override("convex_hull")();}
                 inline double* feature_gradient(int ind){return this->get_override("feature_gradient")();}
                 inline double* residuals(int ind){return this->get_override("residuals")();}
-                inline double cauchy_scaling(){return this->get_override("cauchy_scaling")();}
+                inline double cauchy_scaling()const {return this->get_override("cauchy_scaling")();}
             };
             static void registerNLOptimizer();
             static void registerNLOptimizerClassification();
@@ -47,48 +48,51 @@ namespace sisso
             static void registerNLOptimizerLogRegression();
 
         }
+        #endif
         namespace node
         {
+            #ifdef PARAMETERIZE
             /**
              * @brief struct used wrap a Node object for conversion
              */
             struct NodeWrap :  Node, py::wrapper<Node>
             {
             public:
-                inline std::string expr(){return this->get_override("expr")();}
-                inline std::string expr(double*, int depth=1){return this->get_override("expr")();}
-                inline std::string get_latex_expr(){return this->get_override("latex_expr")();}
-                inline Unit unit(){return this->get_override("unit")();}
-                inline std::vector<double> value(){return this->get_override("value")();}
-                inline std::vector<double> test_value(){return this->get_override("test_value")();}
-                inline void set_value(int offset=-1, bool for_comp=false){this->get_override("set_value")();}
-                inline void set_test_value(int offset=-1, bool for_comp=false){this->get_override("set_test_value")();}
-                inline double* value_ptr(int offset=-1, bool for_comp=false){return this->get_override("value_ptr")();}
-                inline double* test_value_ptr(int offset=-1, bool for_comp=false){return this->get_override("test_value_ptr")();}
-                inline void set_value(double* params, int offset=-1, int depth=1){this->get_override("set_value")();}
-                inline void set_test_value(double* params, int offset=-1, int depth=1){this->get_override("set_test_value")();}
-                inline double* value_ptr(double* params, int offset=-1, int depth=1){return this->get_override("value_ptr")();}
-                inline double* test_value_ptr(double* params, int offset=-1, int depth=1){return this->get_override("test_value_ptr")();}
-                inline bool is_nan(){return this->get_override("is_nan")();}
-                inline bool is_const(){return this->get_override("is_const")();}
-                inline NODE_TYPE type(){return this->get_override("type")();}
-                inline int rung(int cur_rung = 0){return this->get_override("rung")();}
-                inline std::map<int, int> primary_feature_decomp(){return this->get_override("primary_feature_decomp")();}
-                inline void update_primary_feature_decomp(std::map<int, int>& pf_decomp){this->get_override("update_primary_feature_decomp")();}
-                inline void update_postfix(std::string& cur_expr){this->get_override("update_postfix")();}
-                inline std::string get_postfix_term(){return this->get_override("get_postfix_term")();}
-                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot){this->get_override("update_add_sub_leaves")();}
-                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot){this->get_override("update_div_mult_leaves")();}
-                inline std::vector<double> parameters(){return this->get_override("parameters")();}
+                inline std::string expr() const {return this->get_override("expr")();}
+                inline std::string expr(double*, int depth=1) const {return this->get_override("expr")();}
+                inline std::string get_latex_expr() const {return this->get_override("latex_expr")();}
+                inline std::string get_latex_expr(double*, int depth=1) const {return this->get_override("latex_expr")();}
+                inline Unit unit() const {return this->get_override("unit")();}
+                inline std::vector<double> value() const {return this->get_override("value")();}
+                inline std::vector<double> test_value() const {return this->get_override("test_value")();}
+                inline void set_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_value")();}
+                inline void set_test_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_test_value")();}
+                inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return this->get_override("value_ptr")();}
+                inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return this->get_override("test_value_ptr")();}
+                inline void set_value(double* params, int offset=-1, int depth=1) const {this->get_override("set_value")();}
+                inline void set_test_value(double* params, int offset=-1, int depth=1) const {this->get_override("set_test_value")();}
+                inline double* value_ptr(double* params, int offset=-1, int depth=1) const {return this->get_override("value_ptr")();}
+                inline double* test_value_ptr(double* params, int offset=-1, int depth=1) const {return this->get_override("test_value_ptr")();}
+                inline bool is_nan() const {return this->get_override("is_nan")();}
+                inline bool is_const() const {return this->get_override("is_const")();}
+                inline NODE_TYPE type() const {return this->get_override("type")();}
+                inline int rung(int cur_rung = 0) const {return this->get_override("rung")();}
+                inline std::map<int, int> primary_feature_decomp() const {return this->get_override("primary_feature_decomp")();}
+                inline void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const {this->get_override("update_primary_feature_decomp")();}
+                inline void update_postfix(std::string& cur_expr) const {this->get_override("update_postfix")();}
+                inline std::string get_postfix_term() const {return this->get_override("get_postfix_term")();}
+                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const {this->get_override("update_add_sub_leaves")();}
+                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const {this->get_override("update_div_mult_leaves")();}
+                inline std::vector<double> parameters() const {return this->get_override("parameters")();}
                 inline void set_parameters(std::vector<double>, bool check_sz=true){this->get_override("set_parameters")();}
-                inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1){this->get_override("set_bounds")();}
-                inline int n_feats(){this->get_override("n_feats");}
-                inline std::shared_ptr<Node> feat(int ind){this->get_override("feat");}
-                inline void param_derivative(const double* params, double* dfdp){this->get_override("param_derivative");}
-                inline void gradient(double* grad, double* dfdp){this->get_override("gradient");}
-                inline void gradient(double* grad, double* dfdp, const double* params){this->get_override("gradient");}
-                inline std::vector<std::string> get_x_in_expr_list(){this->get_override("get_x_in_expr_list")();}
-                inline int n_leaves(int n_cur_leaves=0){this->get_override("n_cur_leaves")();}
+                inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1) const {this->get_override("set_bounds")();}
+                inline int n_feats() const {this->get_override("n_feats");}
+                inline std::shared_ptr<Node> feat(const int ind) const {this->get_override("feat");}
+                inline void param_derivative(const double* params, double* dfdp) const {this->get_override("param_derivative");}
+                inline void gradient(double* grad, double* dfdp) const {this->get_override("gradient");}
+                inline void gradient(double* grad, double* dfdp, const double* params) const {this->get_override("gradient");}
+                inline std::vector<std::string> get_x_in_expr_list() const {this->get_override("get_x_in_expr_list")();}
+                inline int n_leaves(int n_cur_leaves=0) const {this->get_override("n_cur_leaves")();}
 
             };
             /**
@@ -97,26 +101,76 @@ namespace sisso
             template<int N>
             struct OperatorNodeWrap : OperatorNode<N>, py::wrapper<OperatorNode<N>>
             {
-                inline void set_value(int offset=-1, bool for_comp=false){this->get_override("set_value")();}
-                inline void set_test_value(int offset=-1, bool for_comp=false){this->get_override("set_test_value")();}
-                inline void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1){this->get_override("set_value")();}
-                inline void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1){this->get_override("set_test_value")();}
-                inline NODE_TYPE type(){return this->get_override("type")();}
-                inline int rung(int cur_rung = 0){return this->get_override("rung")();}
-                inline Unit unit(){return this->get_override("unit")();}
-                inline std::string get_postfix_term(){return this->get_override("get_postfix_term")();}
-                inline std::string expr(double* params, int depth=1){return this->get_override("expr")();}
-                inline std::string expr(){return this->get_override("expr")();}
-                inline std::string get_latex_expr(double* params, int depth=1){return this->get_override("latex_expr")();}
-                inline std::string get_latex_expr(){return this->get_override("latex_expr")();}
-                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot){this->get_override("update_add_sub_leaves")();}
-                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot){this->get_override("update_div_mult_leaves")();}
+                inline void set_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_value")();}
+                inline void set_test_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_test_value")();}
+                inline void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1) const {this->get_override("set_value")();}
+                inline void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1) const {this->get_override("set_test_value")();}
+                inline NODE_TYPE type() const {return this->get_override("type")();}
+                inline int rung(int cur_rung = 0) const {return this->get_override("rung")();}
+                inline Unit unit() const {return this->get_override("unit")();}
+                inline std::string get_postfix_term() const {return this->get_override("get_postfix_term")();}
+                inline std::string expr(const double* params, const int depth=1) const {return this->get_override("expr")();}
+                inline std::string expr() const {return this->get_override("expr")();}
+                inline std::string get_latex_expr(const double* params, const int depth=1) const {return this->get_override("latex_expr")();}
+                inline std::string get_latex_expr() const {return this->get_override("latex_expr")();}
+                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const {this->get_override("update_add_sub_leaves")();}
+                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const {this->get_override("update_div_mult_leaves")();}
                 inline void get_parameters(std::shared_ptr<NLOptimizer> optimizer){this->get_override("get_parameters")();}
                 inline void set_parameters(std::vector<double>, bool check_sz=true){this->get_override("set_parameters")();}
-                inline std::vector<double> parameters(){return this->get_override("parameters")();}
-                inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1){this->get_override("set_bounds")();}
-                inline void param_derivative(const double* params, double* dfdp){this->get_override("param_derivative");}
+                inline std::vector<double> parameters() const {return this->get_override("parameters")();}
+                inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1) const {this->get_override("set_bounds")();}
+                inline void param_derivative(const double* params, double* dfdp) const {this->get_override("param_derivative");}
+            };
+            #else
+            /**
+             * @brief struct used wrap a Node object for conversion
+             */
+            struct NodeWrap :  Node, py::wrapper<Node>
+            {
+            public:
+                inline std::string expr() const {return this->get_override("expr")();}
+                inline std::string get_latex_expr() const {return this->get_override("latex_expr")();}
+                inline Unit unit() const {return this->get_override("unit")();}
+                inline std::vector<double> value() const {return this->get_override("value")();}
+                inline std::vector<double> test_value() const {return this->get_override("test_value")();}
+                inline void set_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_value")();}
+                inline void set_test_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_test_value")();}
+                inline double* value_ptr(int offset=-1, const bool for_comp=false) const {return this->get_override("value_ptr")();}
+                inline double* test_value_ptr(int offset=-1, const bool for_comp=false) const {return this->get_override("test_value_ptr")();}
+                inline bool is_nan() const {return this->get_override("is_nan")();}
+                inline bool is_const() const {return this->get_override("is_const")();}
+                inline NODE_TYPE type() const {return this->get_override("type")();}
+                inline int rung(int cur_rung = 0) const {return this->get_override("rung")();}
+                inline std::map<int, int> primary_feature_decomp() const {return this->get_override("primary_feature_decomp")();}
+                inline void update_primary_feature_decomp(std::map<int, int>& pf_decomp) const {this->get_override("update_primary_feature_decomp")();}
+                inline void update_postfix(std::string& cur_expr) const {this->get_override("update_postfix")();}
+                inline std::string get_postfix_term() const {return this->get_override("get_postfix_term")();}
+                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const {this->get_override("update_add_sub_leaves")();}
+                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const {this->get_override("update_div_mult_leaves")();}
+                inline int n_feats() const {this->get_override("n_feats");}
+                inline std::shared_ptr<Node> feat(const int ind) const {this->get_override("feat");}
+                inline std::vector<std::string> get_x_in_expr_list() const {this->get_override("get_x_in_expr_list")();}
+                inline int n_leaves(int n_cur_leaves=0) const {this->get_override("n_cur_leaves")();}
+
             };
+            /**
+             * @brief struct used wrap an OperatorNode object for conversion
+             */
+            template<int N>
+            struct OperatorNodeWrap : OperatorNode<N>, py::wrapper<OperatorNode<N>>
+            {
+                inline void set_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_value")();}
+                inline void set_test_value(int offset=-1, const bool for_comp=false) const {this->get_override("set_test_value")();}
+                inline NODE_TYPE type() const {return this->get_override("type")();}
+                inline int rung(int cur_rung = 0) const {return this->get_override("rung")();}
+                inline Unit unit() const {return this->get_override("unit")();}
+                inline std::string get_postfix_term() const {return this->get_override("get_postfix_term")();}
+                inline std::string expr() const {return this->get_override("expr")();}
+                inline std::string get_latex_expr() const {return this->get_override("latex_expr")();}
+                inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, const int pl_mn, int& expected_abs_tot) const {this->get_override("update_add_sub_leaves")();}
+                inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, const double fact, double& expected_abs_tot) const {this->get_override("update_div_mult_leaves")();}
+            };
+            #endif
 
             /**
              * @brief Register the Node for accessing the object via python
diff --git a/src/python/feature_creation/FeatureSpace.cpp b/src/python/feature_creation/FeatureSpace.cpp
index aad46786999756ae23ac5ca0b0b963600dd0bd6e..4fa6f9147351c23019e4623f989c26092c0cea97 100644
--- a/src/python/feature_creation/FeatureSpace.cpp
+++ b/src/python/feature_creation/FeatureSpace.cpp
@@ -150,7 +150,7 @@ FeatureSpace::FeatureSpace(
         }
     }
 
-    std::vector<int> rung_inds = util_funcs::argsort(rungs);
+    std::vector<int> rung_inds = util_funcs::argsort<int>(rungs);
     _max_phi = *std::max_element(rungs.begin(), rungs.end());
     node_value_arrs::MAX_RUNG = _max_phi;
 
@@ -257,7 +257,7 @@ FeatureSpace::FeatureSpace(
         }
     }
 
-    std::vector<int> rung_inds = util_funcs::argsort(rungs);
+    std::vector<int> rung_inds = util_funcs::argsort<int>(rungs);
     _max_phi = *std::max_element(rungs.begin(), rungs.end());
     node_value_arrs::MAX_RUNG = _max_phi;
 
diff --git a/src/utils/compare_features.cpp b/src/utils/compare_features.cpp
index a7710d0e995b7705f4e31bbdc4b287a3f3e475e6..832b2d7679f1964c8c3806892649f389ae114105 100644
--- a/src/utils/compare_features.cpp
+++ b/src/utils/compare_features.cpp
@@ -6,9 +6,9 @@ std::vector<int> comp_feats::INDEX;
 void comp_feats::set_is_valid_fxn(
     const std::string project_type,
     const double max_corr,
-    const int n_samp,
-    std::function<bool(double*, int, double, std::vector<double>&, double, int, int)>& is_valid,
-    std::function<bool(double*, int, double, std::vector<node_ptr>&, std::vector<double>&, double)>& is_valid_feat_list
+    const const int n_samp,
+    std::function<bool(const double*, const int, const double, const std::vector<double>&, const double, const int, const int)>& is_valid,
+    std::function<bool(const double*, const int, const double, const std::vector<node_ptr>&, const std::vector<double>&, const double)>& is_valid_feat_list
 )
 {
     if(project_type.compare("classification") != 0)
@@ -44,10 +44,16 @@ void comp_feats::set_is_valid_fxn(
 
 
 bool comp_feats::valid_feature_against_selected_pearson_max_corr_1(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<double>& scores_sel,
+    const double cur_score,
+    const int end_sel,
+    const int start_sel
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -66,10 +72,15 @@ bool comp_feats::valid_feature_against_selected_pearson_max_corr_1(
 }
 
 bool comp_feats::valid_feature_against_selected_pearson_max_corr_1_feat_list(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_ptr>& selected,
+    const std::vector<double>& scores_sel,
+    const double cur_score
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -89,10 +100,14 @@ bool comp_feats::valid_feature_against_selected_pearson_max_corr_1_feat_list(
 }
 
 bool comp_feats::valid_feature_against_selected_pearson_max_corr_1_mpi_op(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_sc_pair>& out_vec,
+    const double cur_score
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -113,10 +128,16 @@ bool comp_feats::valid_feature_against_selected_pearson_max_corr_1_mpi_op(
 
 
 bool comp_feats::valid_feature_against_selected_pearson(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<double>& scores_sel,
+    const double cur_score,
+    const int end_sel,
+    const int start_sel
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -139,10 +160,15 @@ bool comp_feats::valid_feature_against_selected_pearson(
 }
 
 bool comp_feats::valid_feature_against_selected_pearson_feat_list(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_ptr>& selected,
+    const std::vector<double>& scores_sel,
+    const double cur_score
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -157,10 +183,14 @@ bool comp_feats::valid_feature_against_selected_pearson_feat_list(
 }
 
 bool comp_feats::valid_feature_against_selected_pearson_mpi_op(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_sc_pair>& out_vec,
+    const double cur_score
 )
 {
-    double mean = util_funcs::mean(val_ptr, n_samp);
+    double mean = util_funcs::mean<double>(val_ptr, n_samp);
     double stand_dev = util_funcs::stand_dev(val_ptr, n_samp, mean);
     double base_val = util_funcs::r(val_ptr, val_ptr, n_samp, mean, stand_dev, mean, stand_dev);
 
@@ -176,7 +206,13 @@ bool comp_feats::valid_feature_against_selected_pearson_mpi_op(
 
 
 bool comp_feats::valid_feature_against_selected_spearman_max_corr_1(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<double>& scores_sel,
+    const double cur_score,
+    const int end_sel,
+    const int start_sel
 )
 {
     double base_val = std::abs(
@@ -208,7 +244,12 @@ bool comp_feats::valid_feature_against_selected_spearman_max_corr_1(
 }
 
 bool comp_feats::valid_feature_against_selected_spearman_max_corr_1_feat_list(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_ptr>& selected,
+    const std::vector<double>& scores_sel,
+    const double cur_score
 )
 {
     double base_val = std::abs(
@@ -240,7 +281,11 @@ bool comp_feats::valid_feature_against_selected_spearman_max_corr_1_feat_list(
 }
 
 bool comp_feats::valid_feature_against_selected_spearman_max_corr_1_mpi_op(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_sc_pair>& out_vec,
+    const double cur_score
 )
 {
     double base_val = std::abs(
@@ -277,7 +322,13 @@ bool comp_feats::valid_feature_against_selected_spearman_max_corr_1_mpi_op(
 }
 
 bool comp_feats::valid_feature_against_selected_spearman(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<double>& scores_sel,
+    const double cur_score,
+    const int end_sel,
+    const int start_sel
 )
 {
     double base_val = std::abs(
@@ -312,7 +363,12 @@ bool comp_feats::valid_feature_against_selected_spearman(
 }
 
 bool comp_feats::valid_feature_against_selected_spearman_feat_list(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_ptr>& selected,
+    const std::vector<double>& scores_sel,
+    const double cur_score
 )
 {
     double base_val = std::abs(
@@ -342,7 +398,11 @@ bool comp_feats::valid_feature_against_selected_spearman_feat_list(
 }
 
 bool comp_feats::valid_feature_against_selected_spearman_mpi_op(
-    double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+    const double* val_ptr,
+    const int n_samp,
+    const double cross_cor_max,
+    const std::vector<node_sc_pair>& out_vec,
+    const double cur_score
 )
 {
     double base_val = std::abs(
diff --git a/src/utils/compare_features.hpp b/src/utils/compare_features.hpp
index b5f54eb5b339581fa003a275661b0cd664e1f49a..d21d806c221a328beb348277e999b6c8ab20f97c 100644
--- a/src/utils/compare_features.hpp
+++ b/src/utils/compare_features.hpp
@@ -32,8 +32,8 @@ namespace comp_feats
         const std::string project_type,
         const double max_corr,
         const int n_samp,
-        std::function<bool(double*, int, double, std::vector<double>&, double, int, int)>& is_valid,
-        std::function<bool(double*, int, double, std::vector<node_ptr>&, std::vector<double>&, double)>& is_valid_feat_list
+        std::function<bool(const double*, const int, const double, const std::vector<double>&, const double, const int, const int)>& is_valid,
+        std::function<bool(const double*, const int, const double, const std::vector<node_ptr>&, const std::vector<double>&, const double)>& is_valid_feat_list
     );
 
     /**
@@ -50,7 +50,13 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson_max_corr_1(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel=0
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<double>& scores_sel,
+        const double cur_score,
+        const int end_sel,
+        const int start_sel=0
     );
 
     /**
@@ -66,7 +72,12 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson_max_corr_1_feat_list(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+        const double*
+        const val_ptr, int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_ptr>& selected,
+        const std::vector<double>& scores_sel,
+        const double cur_score
     );
 
     /**
@@ -82,7 +93,11 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson_max_corr_1_mpi_op(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_sc_pair>& out_vec,
+        const double cur_score
     );
 
     /**
@@ -99,7 +114,13 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel=0
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<double>& scores_sel,
+        const double cur_score,
+        const int end_sel,
+        const int start_sel=0
     );
 
     /**
@@ -115,7 +136,12 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson_feat_list(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_ptr>& selected,
+        const std::vector<double>& scores_sel,
+        const double cur_score
     );
 
     /**
@@ -131,7 +157,11 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_pearson_mpi_op(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_sc_pair>& out_vec,
+        const double cur_score
     );
 
     /**
@@ -148,7 +178,13 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman_max_corr_1(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel=0
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<double>& scores_sel,
+        const double cur_score,
+        const int end_sel,
+        const int start_sel=0
     );
 
     /**
@@ -164,7 +200,12 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman_max_corr_1_feat_list(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_ptr>& selected,
+        const std::vector<double>& scores_sel,
+        const double cur_score
     );
 
     /**
@@ -180,7 +221,11 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman_max_corr_1_mpi_op(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_sc_pair>& out_vec,
+        const double cur_score
     );
 
     /**
@@ -197,7 +242,13 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<double>& scores_sel, double cur_score, int end_sel, int start_sel=0
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<double>& scores_sel,
+        const double cur_score,
+        const int end_sel,
+        const int start_sel=0
     );
 
     /**
@@ -213,7 +264,12 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman_feat_list(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_ptr>& selected, std::vector<double>& scores_sel, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_ptr>& selected,
+        const std::vector<double>& scores_sel,
+        const double cur_score
     );
 
     /**
@@ -229,7 +285,11 @@ namespace comp_feats
      * @return True if the feature is still valid
      */
     bool valid_feature_against_selected_spearman_mpi_op(
-        double* val_ptr, int n_samp, double cross_cor_max, std::vector<node_sc_pair>& out_vec, double cur_score
+        const double* val_ptr,
+        const int n_samp,
+        const double cross_cor_max,
+        const std::vector<node_sc_pair>& out_vec,
+        const double cur_score
     );
 }
 
diff --git a/src/utils/math_funcs.cpp b/src/utils/math_funcs.cpp
index ba20d278c3fb222b6d5ce63645b51787e4c4e170..b853a0e27f2d756dfc31bdea4da23ac8f5a479a3 100644
--- a/src/utils/math_funcs.cpp
+++ b/src/utils/math_funcs.cpp
@@ -20,19 +20,19 @@ bool util_funcs::iterate(std::vector<int>& inds, int size, int incriment)
     return cont;
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, int size)
+double util_funcs::log_r2(const double* a, const double* b, double* log_a, const int size)
 {
     std::transform(a, a + size, log_a, [](double aa){return std::log(aa);});
     return r2(log_a, b, size);
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, int size, double mean_b, double std_b)
+double util_funcs::log_r2(const double* a, const double* b, double* log_a, const int size, const double mean_b, const double std_b)
 {
     std::transform(a, a + size, log_a, [](double aa){return std::log(aa);});
     return r2(b, log_a, size, mean_b, std_b);
 }
 
-double util_funcs::r(double* a, double* b, const std::vector<int>& sizes)
+double util_funcs::r(const double* a, const double* b, const std::vector<int>& sizes)
 {
     double result = 0.0;
     int pos = 0;
@@ -45,7 +45,7 @@ double util_funcs::r(double* a, double* b, const std::vector<int>& sizes)
     return std::sqrt(result / sizes.size());
 }
 
-double util_funcs::r(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a)
+double util_funcs::r(const double* a, const double* b, const std::vector<int>& sizes, const std::vector<double>& mean_a, const std::vector<double>& std_a)
 {
     double result = 0.0;
     int pos = 0;
@@ -58,7 +58,14 @@ double util_funcs::r(double* a, double* b, const std::vector<int>& sizes, std::v
     return std::sqrt(result / sizes.size());
 }
 
-double util_funcs::r(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a, std::vector<double>& mean_b, std::vector<double>& std_b)
+double util_funcs::r(
+    const double* a,
+    const double* b,
+    const std::vector<int>& sizes,
+    const std::vector<double>& mean_a,
+    const std::vector<double>& std_a,
+    const std::vector<double>& mean_b,
+    const std::vector<double>& std_b)
 {
     double result = 0.0;
     int pos = 0;
@@ -71,7 +78,7 @@ double util_funcs::r(double* a, double* b, const std::vector<int>& sizes, std::v
     return std::sqrt(result / sizes.size());
 }
 
-double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes)
+double util_funcs::r2(const double* a, const double* b, const std::vector<int>& sizes)
 {
     double result = 0.0;
     int pos = 0;
@@ -84,7 +91,7 @@ double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes)
     return result / sizes.size();
 }
 
-double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a)
+double util_funcs::r2(const double* a, const double* b, const std::vector<int>& sizes, const std::vector<double>& mean_a, const std::vector<double>& std_a)
 {
     double result = 0.0;
     int pos = 0;
@@ -97,7 +104,15 @@ double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes, std::
     return result / sizes.size();
 }
 
-double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a, std::vector<double>& mean_b, std::vector<double>& std_b)
+double util_funcs::r2(
+    const double* a,
+    const double* b,
+    const std::vector<int>& sizes,
+    const std::vector<double>& mean_a,
+    const std::vector<double>& std_a,
+    const std::vector<double>& mean_b,
+    const std::vector<double>& std_b
+)
 {
     double result = 0.0;
     int pos = 0;
@@ -110,7 +125,7 @@ double util_funcs::r2(double* a, double* b, const std::vector<int>& sizes, std::
     return result / sizes.size();
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, const std::vector<int>& sizes)
+double util_funcs::log_r2(const double* a, const double* b, double* log_a, const std::vector<int>& sizes)
 {
     double result = 0.0;
     int pos = 0;
@@ -123,7 +138,9 @@ double util_funcs::log_r2(double* a, double* b, double* log_a, const std::vector
     return result / sizes.size();
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, const std::vector<int>& sizes, std::vector<double>& mean_b, std::vector<double>& std_b)
+double util_funcs::log_r2(
+    const double* a, const double* b, double* log_a, const std::vector<int>& sizes, const std::vector<double>& mean_b, const std::vector<double>& std_b
+)
 {
     double result = 0.0;
     int pos = 0;
@@ -136,7 +153,7 @@ double util_funcs::log_r2(double* a, double* b, double* log_a, const std::vector
     return result / sizes.size();
 }
 
-double util_funcs::r(double* a, double* b, const int* sz, int n_task)
+double util_funcs::r(const double* a, const double* b, const int* sz, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -149,7 +166,7 @@ double util_funcs::r(double* a, double* b, const int* sz, int n_task)
     return std::sqrt(result / static_cast<double>(n_task));
 }
 
-double util_funcs::r(double* a, double* b, const int* sz, double* mean_a, double* std_a, int n_task)
+double util_funcs::r(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -162,7 +179,9 @@ double util_funcs::r(double* a, double* b, const int* sz, double* mean_a, double
     return std::sqrt(result / static_cast<double>(n_task));
 }
 
-double util_funcs::r(double* a, double* b, const int* sz, double* mean_a, double* std_a, double* mean_b, double* std_b, int n_task)
+double util_funcs::r(
+    const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const double* mean_b, const double* std_b, const int n_task
+)
 {
     double result = 0.0;
     int pos = 0;
@@ -175,7 +194,7 @@ double util_funcs::r(double* a, double* b, const int* sz, double* mean_a, double
     return std::sqrt(result / static_cast<double>(n_task));
 }
 
-double util_funcs::r2(double* a, double* b, const int* sz, int n_task)
+double util_funcs::r2(const double* a, const double* b, const int* sz, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -188,7 +207,7 @@ double util_funcs::r2(double* a, double* b, const int* sz, int n_task)
     return result / static_cast<double>(n_task);
 }
 
-double util_funcs::r2(double* a, double* b, const int* sz, double* mean_a, double* std_a, int n_task)
+double util_funcs::r2(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -201,7 +220,9 @@ double util_funcs::r2(double* a, double* b, const int* sz, double* mean_a, doubl
     return result / static_cast<double>(n_task);
 }
 
-double util_funcs::r2(double* a, double* b, const int* sz, double* mean_a, double* std_a, double* mean_b, double* std_b, int n_task)
+double util_funcs::r2(
+    const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const double* mean_b, const double* std_b, const int n_task
+)
 {
     double result = 0.0;
     int pos = 0;
@@ -214,7 +235,7 @@ double util_funcs::r2(double* a, double* b, const int* sz, double* mean_a, doubl
     return result / static_cast<double>(n_task);
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, const int* sz, int n_task)
+double util_funcs::log_r2(const double* a, const double* b, double* log_a, const int* sz, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -227,7 +248,7 @@ double util_funcs::log_r2(double* a, double* b, double* log_a, const int* sz, in
     return result / static_cast<double>(n_task);
 }
 
-double util_funcs::log_r2(double* a, double* b, double* log_a, const int* sz, double* mean_b, double* std_b, int n_task)
+double util_funcs::log_r2(const double* a, const double* b, double* log_a, const int* sz, const double* mean_b, const double* std_b, const int n_task)
 {
     double result = 0.0;
     int pos = 0;
@@ -240,24 +261,7 @@ double util_funcs::log_r2(double* a, double* b, double* log_a, const int* sz, do
     return result / static_cast<double>(n_task);
 }
 
-std::vector<int> util_funcs::argsort(std::vector<double>& vec)
-{
-    std::vector<int> index(vec.size());
-    std::iota(index.begin(), index.end(), 0);
-    std::sort(index.begin(), index.end(), [&vec](int i1, int i2){return vec[i1] < vec[i2];});
-
-    return index;
-}
-std::vector<int> util_funcs::argsort(std::vector<int>& vec)
-{
-    std::vector<int> index(vec.size());
-    std::iota(index.begin(), index.end(), 0);
-    std::sort(index.begin(), index.end(), [&vec](int i1, int i2){return vec[i1] < vec[i2];});
-
-    return index;
-}
-
-void util_funcs::rank(double* a, double* rank, int* index, int size)
+void util_funcs::rank(const double* a, double* rank, int* index, const int size)
 {
     std::iota(index, index + size, 0);
     std::sort(index, index + size, [a](int i1, int i2){return a[i1] < a[i2];});
@@ -279,14 +283,14 @@ void util_funcs::rank(double* a, double* rank, int* index, int size)
     }
 }
 
-double util_funcs::spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, int size)
+double util_funcs::spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const int size)
 {
     rank(a, rank_a, index, size);
     rank(b, rank_b, index, size);
     return r(rank_a, rank_b, size);
 }
 
-double util_funcs::spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, const std::vector<int>& sizes)
+double util_funcs::spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const std::vector<int>& sizes)
 {
     double result = 0.0;
     int pos = 0;
@@ -298,7 +302,7 @@ double util_funcs::spearman_r(double* a, double* b, double* rank_a, double* rank
     return result / static_cast<double>(sizes.size());
 }
 
-double util_funcs::spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, const int* sz, int n_tasks)
+double util_funcs::spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const int* sz, const int n_tasks)
 {
     double result = 0.0;
     int pos = 0;
diff --git a/src/utils/math_funcs.hpp b/src/utils/math_funcs.hpp
index be50bbd1c7c33133b4747ba4fc3bcf7dd40eddcd..c505163456fa842d2fdeaddb5211c50fb205d2ac 100644
--- a/src/utils/math_funcs.hpp
+++ b/src/utils/math_funcs.hpp
@@ -18,9 +18,23 @@
 
 namespace util_funcs
 {
+    /**
+     * @brief Get the sign of a number
+     *
+     * @param number The number to get the sign of
+     * @return -1 for negative, 0 for 0 and 1 for positive
+     */
     template<typename T>
     inline T sign(T number){return (number > T(0)) - (number < T(0));};
 
+    /**
+     * @brief Round a number to arbitrary precision
+     *
+     * @param num The number to round
+     * @param n The precision
+     *
+     * @return The rounded number
+     */
     inline double round(double num, int n)
     {
         double exp = std::floor(std::log10(num));
@@ -33,24 +47,8 @@ namespace util_funcs
      * @param vec vector to find the mean of
      * @return the mean of the vector
      */
-    inline double mean(std::vector<double>& vec){return std::accumulate(vec.begin(), vec.end(), 0.0) / vec.size();};
-
-    /**
-     * @brief Find the mean of of a vector
-     *
-     * @param start pointer to the start of the vector
-     * @param size the size of the vector
-     * @return the mean of the vector
-     */
-    inline double mean(double* start, int size){return std::accumulate(start, start + size, 0.0) / size;};
-
-    /**
-     * @brief Find the mean of of a vector
-     *
-     * @param vec vec vector to find the mean of
-     * @return the mean of the vector
-     */
-    inline double mean(std::vector<int>& vec){return static_cast<double>(std::accumulate(vec.begin(), vec.end(), 0)) / vec.size();};
+    template<typename T>
+    inline double mean(const std::vector<T>& vec){return static_cast<double>(std::accumulate(vec.begin(), vec.end(), 0.0)) / static_cast<double>(vec.size());};
 
     /**
      * @brief Find the mean of of a vector
@@ -59,7 +57,8 @@ namespace util_funcs
      * @param size the size of the vector
      * @return the mean of the vector
      */
-    inline double mean(int* start, int size){return static_cast<double>(std::accumulate(start, start + size, 0)) / size;};
+    template<typename T>
+    inline double mean(const T* start, int size){return static_cast<double>(std::accumulate(start, start + size, 0.0)) / static_cast<double>(size);};
 
     /**
      * @brief Find the standard deviation of a vector
@@ -67,9 +66,9 @@ namespace util_funcs
      * @param vec The vector to calculate the stand deviation of.
      * @return the standard deviation
      */
-    inline double stand_dev(std::vector<double>& vec)
+    inline double stand_dev(const std::vector<double>& vec)
     {
-        double vec_mean = mean(vec);
+        double vec_mean = mean<double>(vec);
         return std::sqrt(
             std::accumulate(vec.begin(), vec.end(), 0.0, [&vec_mean](double total, double val){return total + std::pow(val - vec_mean, 2.0);}) / vec.size()
         );
@@ -81,9 +80,9 @@ namespace util_funcs
      * @param size size the size of the vector
      * @return The standard deviation of thwe vector
      */
-    inline double stand_dev(double* start, int size)
+    inline double stand_dev(const double* start, const int size)
     {
-        double vec_mean = mean(start, size);
+        double vec_mean = mean<double>(start, size);
         return std::sqrt(
             std::accumulate(start, start+size, 0.0, [&vec_mean](double total, double val){return total + std::pow(val - vec_mean, 2.0);}) / size
         );
@@ -96,7 +95,7 @@ namespace util_funcs
      * @param vec_mean The mean of the vector
      * @return the standard deviation
      */
-    inline double stand_dev(std::vector<double>& vec, double vec_mean)
+    inline double stand_dev(const std::vector<double>& vec, const double vec_mean)
     {
         return std::sqrt(
             std::accumulate(vec.begin(), vec.end(), 0.0, [&vec_mean](double total, double val){return total + std::pow(val - vec_mean, 2.0);}) / vec.size()
@@ -110,7 +109,7 @@ namespace util_funcs
      * @param vec_mean The mean of the vector
      * @return The standard deviation of thwe vector
      */
-    inline double stand_dev(double* start, int size, double vec_mean)
+    inline double stand_dev(const double* start, const int size, const double vec_mean)
     {
         return std::sqrt(std::accumulate(start, start+size, 0.0, [&vec_mean](double total, double val){return total + std::pow(val - vec_mean, 2.0);}) / size);
     };
@@ -121,7 +120,7 @@ namespace util_funcs
      * @param vec The vector to calculate the stand deviation of.
      * @return the norm of the vector
      */
-    inline double norm(std::vector<double>& vec)
+    inline double norm(const std::vector<double>& vec)
     {
         return std::sqrt(std::inner_product(vec.begin(), vec.end(), vec.begin(), 0.0));
     };
@@ -133,7 +132,7 @@ namespace util_funcs
      * @param size size the size of the vector
      * @return The norm of the vector
      */
-    inline double norm(double* start, int size)
+    inline double norm(const double* start, const int size)
     {
         return std::sqrt(std::inner_product(start, start + size, start, 0.0));
     };
@@ -146,10 +145,10 @@ namespace util_funcs
      * @param size the size of the vector
      * @return The correlation coefficient between vector a and vector b
      */
-    inline double r(double* a, double* b, int size)
+    inline double r(const double* a, const double* b, const int size)
     {
-        double mean_a = mean(a, size);
-        double mean_b = mean(b, size);
+        double mean_a = mean<double>(a, size);
+        double mean_b = mean<double>(b, size);
         return  1.0 / (static_cast<double>(size) * stand_dev(a, size, mean_a) * stand_dev(b, size, mean_b)) * std::inner_product(
             a,
             a + size,
@@ -168,9 +167,9 @@ namespace util_funcs
      * @param std_a the standard deviation of the a vector
      * @return The correlation coefficient between vector a and vector b
      */
-    inline double r(double* a, double* b, int size, double mean_a, double std_a)
+    inline double r(const double* a, const double* b, const int size, const double mean_a, const double std_a)
     {
-        double mean_b = mean(b, size);
+        double mean_b = mean<double>(b, size);
         return 1.0 / (static_cast<double>(size) * std_a * stand_dev(b, size, mean_b)) * std::inner_product(
             a,
             a + size,
@@ -191,7 +190,7 @@ namespace util_funcs
      * @param std_b the standard deviation of the b vector
      * @return The correlation coefficient between vector a and vector b
      */
-    inline double r(double* a, double* b, int size, double mean_a, double std_a, double mean_b, double std_b)
+    inline double r(const double* a, const double* b, const int size, const double mean_a, const double std_a, const double mean_b, const double std_b)
     {
         return 1.0 / (static_cast<double>(size) * std_a * std_b) * std::inner_product(
             a,
@@ -209,7 +208,7 @@ namespace util_funcs
      * @param sizes the sizes of the tasks to calculate the correlation on
      * @return The average Pearson correlations
      */
-    double r(double* a, double* b, const std::vector<int>& sizes);
+    double r(const double* a, const double* b, const std::vector<int>& sizes);
 
     /**
      * @brief The Pearson correlation for two vectors
@@ -221,7 +220,7 @@ namespace util_funcs
      * @param std_a the standard deviation of the a vector for each task
      * @return The average Pearson correlations
      */
-    double r(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a);
+    double r(const double* a, const double* b, const std::vector<int>& sizes, const std::vector<double>& mean_a, const std::vector<double>& std_a);
 
     /**
      * @brief The Pearson correlation for two vectors
@@ -236,13 +235,13 @@ namespace util_funcs
      * @return The average Pearson correlations
      */
     double r(
-        double* a,
-        double* b,
+        const double* a,
+        const double* b,
         const std::vector<int>& sizes,
-        std::vector<double>& mean_a,
-        std::vector<double>& std_a,
-        std::vector<double>& mean_b,
-        std::vector<double>& std_b
+        const std::vector<double>& mean_a,
+        const std::vector<double>& std_a,
+        const std::vector<double>& mean_b,
+        const std::vector<double>& std_b
     );
 
     /**
@@ -254,7 +253,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Pearson correlations
      */
-    double r(double* a, double* b, const int* sz, int n_tasks);
+    double r(const double* a, const double* b, const int* sz, const int n_tasks);
 
     /**
      * @brief The Pearson correlation for two vectors
@@ -267,7 +266,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Pearson correlations
      */
-    double r(double* a, double* b, const int* sz, double* mean_a, double* std_a, int n_tasks);
+    double r(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const int n_tasks);
 
     /**
      * @brief The Pearson correlation for two vectors
@@ -282,7 +281,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Pearson correlations
      */
-    double r(double* a, double* b, const int* sz, double* mean_a, double* std_a, double* mean_b, double* std_b, int n_tasks);
+    double r(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const double* mean_b, const double* std_b, const int n_tasks);
 
     /**
      * @brief The Pearson correlation for two vectors
@@ -292,10 +291,10 @@ namespace util_funcs
      * @param size the size of the vector
      * @return The coefficient of determination between vector a and vector b
      */
-    inline double r2(double* a, double* b, int size)
+    inline double r2(const double* a, const double* b, const int size)
     {
-        double mean_a = mean(a, size);
-        double mean_b = mean(b, size);
+        double mean_a = mean<double>(a, size);
+        double mean_b = mean<double>(b, size);
         return std::pow(
             1.0 / (static_cast<double>(size) * stand_dev(a, size, mean_a) * stand_dev(b, size, mean_b)) *
             std::inner_product(
@@ -318,9 +317,9 @@ namespace util_funcs
      * @param std_a the standard deviation of the a vector
      * @return The coefficient of determination between vector a and vector b
      */
-    inline double r2(double* a, double* b, int size, double mean_a, double std_a)
+    inline double r2(const double* a, const double* b, const int size, const double mean_a, const double std_a)
     {
-        double mean_b = mean(b, size);
+        double mean_b = mean<double>(b, size);
         return std::pow(
             1.0  / (static_cast<double>(size) * std_a * stand_dev(b, size, mean_b)) *
             std::inner_product(
@@ -345,7 +344,7 @@ namespace util_funcs
      * @param std_b the standard deviation of the b vector
      * @return The coefficient of determination between vector a and vector b
      */
-    inline double r2(double* a, double* b, int size, double mean_a, double std_a, double mean_b, double std_b)
+    inline double r2(const double* a, const double* b, const int size, const double mean_a, const double std_a, const double mean_b, const double std_b)
     {
         return std::pow(
             std::inner_product(a, a + size, b, -1.0 * static_cast<double>(size) * mean_a * mean_b) / (static_cast<double>(size) * std_a * std_b),
@@ -361,7 +360,7 @@ namespace util_funcs
      * @param sizes the sizes of the tasks to calculate the correlation on
      * @return The average Coefficient of Determination
      */
-    double r2(double* a, double* b, const std::vector<int>& sizes);
+    double r2(const double* a, const double* b, const std::vector<int>& sizes);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors
@@ -373,7 +372,7 @@ namespace util_funcs
      * @param std_a the standard deviation of the a vector for each task
      * @return The average Coefficient of Determination
      */
-    double r2(double* a, double* b, const std::vector<int>& sizes, std::vector<double>& mean_a, std::vector<double>& std_a);
+    double r2(const double* a, const double* b, const std::vector<int>& sizes, const std::vector<double>& mean_a, const std::vector<double>& std_a);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors
@@ -388,13 +387,13 @@ namespace util_funcs
      * @return The average Coefficient of Determination
      */
     double r2(
-        double* a,
-        double* b,
+        const double* a,
+        const double* b,
         const std::vector<int>& sizes,
-        std::vector<double>& mean_a,
-        std::vector<double>& std_a,
-        std::vector<double>& mean_b,
-        std::vector<double>& std_b
+        const std::vector<double>& mean_a,
+        const std::vector<double>& std_a,
+        const std::vector<double>& mean_b,
+        const std::vector<double>& std_b
     );
 
     /**
@@ -406,7 +405,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double r2(double* a, double* b, const int* sz, int n_tasks);
+    double r2(const double* a, const double* b, const int* sz, const int n_tasks);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors
@@ -419,7 +418,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double r2(double* a, double* b, const int* sz, double* mean_a, double* std_a, int n_tasks);
+    double r2(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const int n_tasks);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors
@@ -434,7 +433,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double r2(double* a, double* b, const int* sz, double* mean_a, double* std_a, double* mean_b, double* std_b, int n_tasks);
+    double r2(const double* a, const double* b, const int* sz, const double* mean_a, const double* std_a, const double* mean_b, const double* std_b, const int n_tasks);
 
     /**
      * @brief Calculate the Coefficient of Determination between two vectors (For the log transformed problem)
@@ -445,7 +444,7 @@ namespace util_funcs
      * @param size the size of the vector
      * @return The Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, int size);
+    double log_r2(const double* a, const double* b, double* log_a, const int size);
 
     /**
      * @brief Calculate the Coefficient of Determination between two vectors (For the log transformed problem)
@@ -458,7 +457,7 @@ namespace util_funcs
      * @param std_b the standard deviation of the b vector for each task
      * @return The Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, int size, double mean_b, double std_b);
+    double log_r2(const double* a, const double* b, double* log_a, const int size, const double mean_b, const double std_b);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors (For the log transformed problem)
@@ -469,7 +468,7 @@ namespace util_funcs
      * @param sizes the sizes of the tasks to calculate the correlation on
      * @return The average Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, const std::vector<int>& sizes);
+    double log_r2(const double* a, const double* b, double* log_a, const std::vector<int>& sizes);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors (For the log transformed problem)
@@ -482,7 +481,7 @@ namespace util_funcs
      * @param std_b the standard deviation of the b vector for each task
      * @return The average Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, const std::vector<int>& sizes, std::vector<double>& mean_b, std::vector<double>& std_b);
+    double log_r2(const double* a, const double* b, double* log_a, const std::vector<int>& sizes, const std::vector<double>& mean_b, const std::vector<double>& std_b);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors (For the log transformed problem)
@@ -494,7 +493,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, const int* sz, int n_tasks);
+    double log_r2(const double* a, const double* b, double* log_a, const int* sz, const int n_tasks);
 
     /**
      * @brief Calculate the average Coefficient of Determination between two vectors (For the log transformed problem)
@@ -508,7 +507,7 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double log_r2(double* a, double* b, double* log_a, const int* sz, double* mean_b, double* std_b, int n_tasks);
+    double log_r2(const double* a, const double* b, double* log_a, const int* sz, const double* mean_b, const double* std_b, const int n_tasks);
 
     /**
      * @brief Gets the rank variables for a vector
@@ -518,7 +517,7 @@ namespace util_funcs
      * @param index pointer to vector used store the sorted indexes
      * @param sz The size of the vector
      */
-    void rank(double* a, double* rank, int* index, int size);
+    void rank(const double* a, double* rank, int* index, const int size);
 
     /**
      * @brief Calculate the Spearman's rank correlation coefficient between two vectors
@@ -531,7 +530,7 @@ namespace util_funcs
      * @param size the size of the vector
      * @return The Coefficient of Determination
      */
-    double spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, int size);
+    double spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const int size);
 
     /**
      * @brief Calculate the average Spearman's rank correlation coefficient between two vectors
@@ -544,7 +543,7 @@ namespace util_funcs
      * @param sizes the sizes of the tasks to calculate the correlation on
      * @return The average Coefficient of Determination
      */
-    double spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, const std::vector<int>& sizes);
+    double spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const std::vector<int>& sizes);
 
     /**
      * @brief Calculate the average Spearman's rank correlation coefficient between two vectors
@@ -558,58 +557,34 @@ namespace util_funcs
      * @param n_tasks number of tasks to average over
      * @return The average Coefficient of Determination
      */
-    double spearman_r(double* a, double* b, double* rank_a, double* rank_b, int* index, const int* sz, int n_tasks);
-
-    /**
-     * @brief Sort a vector and return the indexes of the unsorted array that corresponds to the sorted one
-     *
-     * @param vec vector to sort
-     * @return The indexes of the sorted array
-     */
-    std::vector<int> argsort(std::vector<double>& vec);
+    double spearman_r(const double* a, const double* b, double* rank_a, double* rank_b, int* index, const int* sz, const int n_tasks);
 
     /**
      * @brief Sort a vector and return the indexes of the unsorted array that corresponds to the sorted one
      *
-     * @param begin the starting point for the sorting
-     * @param end the end point for the sorting
      * @param vec vector to sort
      * @return The indexes of the sorted array
      */
-    inline void argsort(int* begin, int* end, std::vector<double>& vec)
+    template<typename T>
+    std::vector<int> argsort(const std::vector<T>& vec)
     {
-        std::sort(begin, end, [&vec](int i1, int i2){return vec[i1] < vec[i2];});
-    }
+        std::vector<int> index(vec.size());
+        std::iota(index.begin(), index.end(), 0);
+        std::sort(index.begin(), index.end(), [&vec](int i1, int i2){return vec[i1] < vec[i2];});
 
-    /**
-     * @brief Sort a vector and return the indexes of the unsorted array that corresponds to the sorted one
-     *
-     * @param begin the starting point for the sorting
-     * @param end the end point for the sorting
-     * @param vec_begin start pointer to the vector to sort
-     */
-    inline void argsort(int* begin, int* end, double* vec_begin)
-    {
-        std::sort(begin, end, [vec_begin](int i1, int i2){return vec_begin[i1] < vec_begin[i2];});
+        return index;
     }
 
     /**
      * @brief Sort a vector and return the indexes of the unsorted array that corresponds to the sorted one
      *
-     * @param vec vector to sort
-     * @return The indexes of the sorted array
-     */
-    std::vector<int> argsort(std::vector<int>& vec);
-
-    /**
-     * @brief Sort a vector and return the indexes of the unsorted array that corresponds to the sorted one
-     *
-     * @param vec vector to sort
      * @param begin the starting point for the sorting
      * @param end the end point for the sorting
+     * @param vec vector to sort
      * @return The indexes of the sorted array
      */
-    inline void argsort(int* begin, int* end, std::vector<int>& vec)
+    template<typename T>
+    inline void argsort(int* begin, int* end, const std::vector<T>& vec)
     {
         std::sort(begin, end, [&vec](int i1, int i2){return vec[i1] < vec[i2];});
     }
@@ -621,12 +596,13 @@ namespace util_funcs
      * @param end the end point for the sorting
      * @param vec_begin start pointer to the vector to sort
      */
-    inline void argsort(int* begin, int* end, int* vec_begin)
+    template<typename T>
+    inline void argsort(int* begin, int* end, const T* vec_begin)
     {
         std::sort(begin, end, [vec_begin](int i1, int i2){return vec_begin[i1] < vec_begin[i2];});
     }
 
-    /**
+    /**const
      * @brief The maximum absolute value of the vector
      *
      * @param start The starting point for the comparison
diff --git a/src/utils/project.cpp b/src/utils/project.cpp
index c8ed19a650126d7c6ee89c4cbcb42f646cd0b766..c79fb028fd7e272281217cad6ad85a9675d4a3b0 100644
--- a/src/utils/project.cpp
+++ b/src/utils/project.cpp
@@ -3,8 +3,8 @@
 void project_funcs::set_project_fxn(
     const std::string project_type,
     const int n_task,
-    std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)>& project,
-    std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)>& project_no_omp
+    std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)>& project,
+    std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)>& project_no_omp
 )
 {
     if(project_type.compare("regression") == 0)
@@ -29,7 +29,9 @@ void project_funcs::set_project_fxn(
 
 }
 
-void project_funcs::project_r(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_r(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -52,7 +54,7 @@ void project_funcs::project_r(double* prop, double* scores, std::vector<node_ptr
             int pos = 0;
             for(int tt = 0; tt < sizes.size(); ++tt)
             {
-                mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+                mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
                 std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
                 pos += sizes[tt];
             }
@@ -76,7 +78,9 @@ void project_funcs::project_r(double* prop, double* scores, std::vector<node_ptr
     }
 }
 
-void project_funcs::project_r2(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_r2(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -99,7 +103,7 @@ void project_funcs::project_r2(double* prop, double* scores, std::vector<node_pt
             int pos = 0;
             for(int tt = 0; tt < sizes.size(); ++tt)
             {
-                mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+                mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
                 std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
                 pos += sizes[tt];
             }
@@ -123,7 +127,9 @@ void project_funcs::project_r2(double* prop, double* scores, std::vector<node_pt
     }
 }
 
-void project_funcs::project_log_r2(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_log_r2(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -148,7 +154,7 @@ void project_funcs::project_log_r2(double* prop, double* scores, std::vector<nod
             int pos = 0;
             for(int tt = 0; tt < sizes.size(); ++tt)
             {
-                mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+                mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
                 std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
                 pos += sizes[tt];
             }
@@ -172,7 +178,9 @@ void project_funcs::project_log_r2(double* prop, double* scores, std::vector<nod
     }
 }
 
-void project_funcs::project_classify(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_classify(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
     std::fill_n(scores, phi.size(), std::numeric_limits<double>::max());
@@ -204,7 +212,9 @@ void project_funcs::project_classify(double* prop, double* scores, std::vector<n
     }
 }
 
-void project_funcs::project_r_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_r_no_omp(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -218,7 +228,7 @@ void project_funcs::project_r_no_omp(double* prop, double* scores, std::vector<n
         int pos = 0;
         for(int tt = 0; tt < sizes.size(); ++tt)
         {
-            mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+            mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
             std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
             pos += sizes[tt];
         }
@@ -239,7 +249,9 @@ void project_funcs::project_r_no_omp(double* prop, double* scores, std::vector<n
     std::transform(scores, scores + phi.size(), scores, [](double score){return std::isnan(score) ? 0.0 : score;});
 }
 
-void project_funcs::project_r2_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_r2_no_omp(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -253,7 +265,7 @@ void project_funcs::project_r2_no_omp(double* prop, double* scores, std::vector<
         int pos = 0;
         for(int tt = 0; tt < sizes.size(); ++tt)
         {
-            mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+            mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
             std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
             pos += sizes[tt];
         }
@@ -274,7 +286,9 @@ void project_funcs::project_r2_no_omp(double* prop, double* scores, std::vector<
     std::transform(scores, scores + phi.size(), scores, [](double score){return std::isnan(score) ? 0.0 : score;});
 }
 
-void project_funcs::project_log_r2_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_log_r2_no_omp(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
@@ -289,7 +303,7 @@ void project_funcs::project_log_r2_no_omp(double* prop, double* scores, std::vec
         int pos = 0;
         for(int tt = 0; tt < sizes.size(); ++tt)
         {
-            mean_prop[tt] = util_funcs::mean(prop + pos, sizes[tt]);
+            mean_prop[tt] = util_funcs::mean<double>(prop + pos, sizes[tt]);
             std_prop[tt] = util_funcs::stand_dev(prop + pos, sizes[tt], mean_prop[tt]);
             pos += sizes[tt];
         }
@@ -310,7 +324,9 @@ void project_funcs::project_log_r2_no_omp(double* prop, double* scores, std::vec
     std::transform(scores, scores + phi.size(), scores, [](double score){return std::isnan(score) ? 0.0 : score;});
 }
 
-void project_funcs::project_classify_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& sizes, int n_prop)
+void project_funcs::project_classify_no_omp(
+    const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& sizes, const int n_prop
+)
 {
     int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
 
diff --git a/src/utils/project.hpp b/src/utils/project.hpp
index 5929246436709b6610d5d613d7c175eb5470587f..6cf9a9d1937b410c77b469c998e0ec85ba6701ac 100644
--- a/src/utils/project.hpp
+++ b/src/utils/project.hpp
@@ -28,8 +28,8 @@ namespace project_funcs
     void set_project_fxn(
         const std::string project_type,
         const int n_task,
-        std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)>& project,
-        std::function<void(double*, double*, std::vector<node_ptr>&, const std::vector<int>&, int)>& project_no_omp
+        std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)>& project,
+        std::function<void(const double*, double*, const std::vector<node_ptr>&, const std::vector<int>&, const int)>& project_no_omp
     );
 
     /**
@@ -41,7 +41,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Pearson Correlation of and return the maximum of
      */
-    void project_r(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_r(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Coefficient of Determination
@@ -52,7 +52,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Coefficient of Determination of and return the maximum of
      */
-    void project_r2(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_r2(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Coefficient of Determination (on the log transformed problem)
@@ -63,7 +63,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Coefficient of Determination of and return the maximum of
      */
-    void project_log_r2(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_log_r2(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate projection scores for classification
@@ -75,7 +75,7 @@ namespace project_funcs
      * @param size list of the sizes for each task
      * @param n_prop number of properties
      */
-    void project_classify(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_classify(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Pearson correlation
@@ -86,7 +86,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Pearson Correlation of and return the maximum of
      */
-    void project_r_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_r_no_omp(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Coefficient of Determination
@@ -97,7 +97,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Coefficient of Determination of and return the maximum of
      */
-    void project_r2_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_r2_no_omp(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate the projection scores of a set of features to a vector via Coefficient of Determination (on the log transform problem)
@@ -108,7 +108,7 @@ namespace project_funcs
      * @param size Vector of the size of all of the tasks
      * @param n_prop The number of properties to calculate the Coefficient of Determination of and return the maximum of
      */
-    void project_log_r2_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_log_r2_no_omp(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 
     /**
      * @brief Calculate projection scores for classification
@@ -119,7 +119,7 @@ namespace project_funcs
      * @param size list of the sizes for each task
      * @param n_prop number of properties
      */
-    void project_classify_no_omp(double* prop, double* scores, std::vector<node_ptr>& phi, const std::vector<int>& size, int n_prop);
+    void project_classify_no_omp(const double* prop, double* scores, const std::vector<node_ptr>& phi, const std::vector<int>& size, const int n_prop);
 }
 
 #endif
diff --git a/src/utils/string_utils.cpp b/src/utils/string_utils.cpp
index c3f7010a7c9401a8df3922c72cb95c4a01f5de8d..7fd20aa793437651918838b513b93d8ee638bb89 100644
--- a/src/utils/string_utils.cpp
+++ b/src/utils/string_utils.cpp
@@ -1,6 +1,6 @@
 #include <utils/string_utils.hpp>
 
-std::vector<std::string> str_utils::split_string_trim(std::string str, std::string split_tokens)
+std::vector<std::string> str_utils::split_string_trim(const std::string str, const std::string split_tokens)
 {
     std::vector<std::string> split_str;
     boost::algorithm::split(split_str, str, boost::algorithm::is_any_of(split_tokens));
@@ -12,7 +12,7 @@ std::vector<std::string> str_utils::split_string_trim(std::string str, std::stri
     return split_str;
 }
 
-std::string str_utils::latexify(std::string str)
+std::string str_utils::latexify(const std::string str)
 {
     std::string to_ret = "";
     std::vector<std::string> split_str = split_string_trim(str, " ");
diff --git a/src/utils/string_utils.hpp b/src/utils/string_utils.hpp
index 312801e843f741a91c1970afab519d1a0b357b75..53ce035fac1ecf34a36ba97b257b8867412d749e 100644
--- a/src/utils/string_utils.hpp
+++ b/src/utils/string_utils.hpp
@@ -24,7 +24,7 @@ namespace str_utils
      * @param split_tokens characters to split the string with
      * @return The vector with the string split along the split tokens
      */
-    std::vector<std::string> split_string_trim(std::string str, std::string split_tokens = ",;:");
+    std::vector<std::string> split_string_trim(const std::string str, const std::string split_tokens = ",;:");
 
     /**
      * @brief Convert a string into a latex string
@@ -32,7 +32,7 @@ namespace str_utils
      * @param str String to convert to a latex string
      * @return The latexified version of the string
      */
-    std::string latexify(std::string str);
+    std::string latexify(const std::string str);
 }