From c46736177ef32aa9a9e34b5448ba8da688dd7f95 Mon Sep 17 00:00:00 2001
From: Thomas <purcell@fhi-berlin.mpg.de>
Date: Wed, 24 Feb 2021 20:57:24 +0100
Subject: [PATCH] Update nlopt_wrapper

Remove the treadprivate vectors that caused the valgrind errors
It may not have been an actual leak but don't risk it
---
 .../feature_space/FeatureSpace.cpp            | 134 ++--
 .../feature_space/FeatureSpace.hpp            |  56 +-
 .../node/operator_nodes/OperatorNode.hpp      |  32 +-
 .../abs/absolute_value.hpp                    |   7 +-
 .../abs/parameterized_absolute_value.cpp      |  41 +-
 .../abs/parameterized_absolute_value.hpp      |  10 +-
 .../abs_diff/absolute_difference.hpp          |  11 +-
 .../parameterized_absolute_difference.cpp     |  30 +-
 .../parameterized_absolute_difference.hpp     |   9 +-
 .../allowed_operator_nodes/add/add.hpp        |  11 +-
 .../add/parameterized_add.cpp                 |  30 +-
 .../add/parameterized_add.hpp                 |   9 +-
 .../allowed_operator_nodes/cb/cube.hpp        |   7 +-
 .../cb/parameterized_cube.cpp                 |  26 +-
 .../cb/parameterized_cube.hpp                 |   9 +-
 .../allowed_operator_nodes/cbrt/cube_root.hpp |   7 +-
 .../cbrt/parameterized_cube_root.cpp          |  32 +-
 .../cbrt/parameterized_cube_root.hpp          |   9 +-
 .../allowed_operator_nodes/cos/cos.hpp        |   7 +-
 .../cos/parameterized_cos.cpp                 |  24 +-
 .../cos/parameterized_cos.hpp                 |   9 +-
 .../allowed_operator_nodes/div/divide.hpp     |  11 +-
 .../div/parameterized_divide.cpp              |  30 +-
 .../div/parameterized_divide.hpp              |   9 +-
 .../exp/exponential.hpp                       |   7 +-
 .../exp/parameterized_exponential.cpp         |  26 +-
 .../exp/parameterized_exponential.hpp         |   9 +-
 .../allowed_operator_nodes/inv/inverse.hpp    |   7 +-
 .../inv/parameterized_inverse.cpp             |  26 +-
 .../inv/parameterized_inverse.hpp             |   9 +-
 .../allowed_operator_nodes/log/log.hpp        |   7 +-
 .../log/parameterized_log.cpp                 |  26 +-
 .../log/parameterized_log.hpp                 |   9 +-
 .../allowed_operator_nodes/mult/multiply.hpp  |  11 +-
 .../mult/parameterized_multiply.cpp           |  30 +-
 .../mult/parameterized_multiply.hpp           |   9 +-
 .../neg_exp/negative_exponential.hpp          |   7 +-
 .../parameterized_negative_exponential.cpp    |  26 +-
 .../parameterized_negative_exponential.hpp    |   9 +-
 .../sin/parameterized_sin.cpp                 |  26 +-
 .../sin/parameterized_sin.hpp                 |   9 +-
 .../allowed_operator_nodes/sin/sin.hpp        |   7 +-
 .../six_pow/parameterized_sixth_power.cpp     |  26 +-
 .../six_pow/parameterized_sixth_power.hpp     |   9 +-
 .../six_pow/sixth_power.hpp                   |   7 +-
 .../sq/parameterized_square.cpp               |  27 +-
 .../sq/parameterized_square.hpp               |   9 +-
 .../allowed_operator_nodes/sq/square.hpp      |   7 +-
 .../sqrt/parameterized_square_root.cpp        |  30 +-
 .../sqrt/parameterized_square_root.hpp        |   9 +-
 .../sqrt/square_root.hpp                      |   7 +-
 .../sub/parameterized_subtract.cpp            |  30 +-
 .../sub/parameterized_subtract.hpp            |   9 +-
 .../allowed_operator_nodes/sub/subtract.hpp   |  11 +-
 .../node/operator_nodes/allowed_ops.hpp       |   4 +-
 src/nl_opt/NLOptWrapper.cpp                   | 348 ++++++++--
 src/nl_opt/NLOptWrapper.hpp                   | 614 +++++++++++-------
 src/python/__init__.py                        |   2 +-
 src/python/bindings_docstring_keyed.cpp       |  64 +-
 src/python/bindings_docstring_keyed.hpp       |  23 +-
 src/python/feature_creation/FeatureSpace.cpp  |  17 +-
 .../parameterization/test_abs_diff_node.cc    |  23 +-
 .../parameterization/test_abs_node.cc         |  20 +-
 .../parameterization/test_add_node.cc         |  19 +-
 .../parameterization/test_cb_node.cc          |  19 +-
 .../parameterization/test_cbrt_node.cc        |  19 +-
 .../parameterization/test_cos_node.cc         |  27 +-
 .../parameterization/test_div_node.cc         |  19 +-
 .../parameterization/test_exp_node.cc         |  31 +-
 .../parameterization/test_inv_node.cc         |  19 +-
 .../parameterization/test_log_node.cc         |  29 +-
 .../parameterization/test_mult_node.cc        |  19 +-
 .../parameterization/test_neg_exp_node.cc     |  32 +-
 .../parameterization/test_sin_node.cc         |  27 +-
 .../parameterization/test_six_pow_node.cc     |  19 +-
 .../parameterization/test_sq_node.cc          |  19 +-
 .../parameterization/test_sqrt_node.cc        |  19 +-
 .../parameterization/test_sub_node.cc         |  20 +-
 .../test_regressor.py                         |   2 +-
 .../test_feature_space/test_feature_space.py  |   5 -
 .../test_parameterize/test_lorentizan.py      |   8 +-
 .../test_parameterize/test_param_abs.py       |  10 +-
 .../test_parameterize/test_param_abs_diff.py  |   8 +-
 .../test_parameterize/test_param_add.py       |   8 +-
 .../test_parameterize/test_param_cb.py        |   8 +-
 .../test_parameterize/test_param_cbrt.py      |   8 +-
 .../test_parameterize/test_param_cos.py       |   6 +-
 .../test_parameterize/test_param_div.py       |   8 +-
 .../test_parameterize/test_param_exp.py       |   8 +-
 .../test_parameterize/test_param_inv.py       |   8 +-
 .../test_parameterize/test_param_log.py       |   8 +-
 .../test_parameterize/test_param_neg_exp.py   |  12 +-
 .../test_parameterize/test_param_sin.py       |   6 +-
 .../test_parameterize/test_param_six_pow.py   |   6 +-
 .../test_parameterize/test_param_sq.py        |   8 +-
 .../test_parameterize/test_param_sqrt.py      |   8 +-
 .../test_parameterize/test_param_sub.py       |   8 +-
 .../test_abs_diff_param.py                    |   4 +-
 .../test_param_model_node/test_abs_param.py   |   4 +-
 .../test_param_model_node/test_add_param.py   |   4 +-
 .../test_binary_binary_param.py               |   4 +-
 .../test_binary_unary_param.py                |   4 +-
 .../test_param_model_node/test_cb_param.py    |   4 +-
 .../test_param_model_node/test_cbrt_param.py  |   4 +-
 .../test_param_model_node/test_cos_param.py   |   4 +-
 .../test_param_model_node/test_div_param.py   |   4 +-
 .../test_param_model_node/test_exp_param.py   |   4 +-
 .../test_param_model_node/test_inv_param.py   |   4 +-
 .../test_param_model_node/test_log_param.py   |   4 +-
 .../test_param_model_node/test_mult_param.py  |   4 +-
 .../test_neg_exp_param.py                     |   4 +-
 .../test_param_model_node/test_sin_param.py   |   4 +-
 .../test_six_pow_param.py                     |   4 +-
 .../test_param_model_node/test_sq_param.py    |   4 +-
 .../test_param_model_node/test_sqrt_param.py  |   4 +-
 .../test_param_model_node/test_sub_param.py   |   4 +-
 .../test_unary_binary_param.py                |   4 +-
 .../test_unary_unary_param.py                 |   4 +-
 118 files changed, 1592 insertions(+), 1088 deletions(-)

diff --git a/src/feature_creation/feature_space/FeatureSpace.cpp b/src/feature_creation/feature_space/FeatureSpace.cpp
index ecf32ff7..e943d3d9 100644
--- a/src/feature_creation/feature_space/FeatureSpace.cpp
+++ b/src/feature_creation/feature_space/FeatureSpace.cpp
@@ -65,6 +65,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -77,10 +78,10 @@ FeatureSpace::FeatureSpace(
     _n_rung_generate(n_rung_generate),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
-void FeatureSpace::initialize_fs(std::string project_type)
+void FeatureSpace::initialize_fs()
 {
     #ifndef PARAMETERIZE
         if(_allowed_param_ops.size() != 0)
@@ -90,7 +91,7 @@ void FeatureSpace::initialize_fs(std::string project_type)
             _max_param_depth = _max_phi;
         if((_max_param_depth < 0) || (_max_param_depth > _max_phi))
             throw std::logic_error("Invalid parameter depth.");
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, _max_param_depth);
+        nlopt_wrapper::MAX_PARAM_DEPTH = _max_param_depth;
     #endif
 
     if(_n_rung_store == -1)
@@ -112,17 +113,17 @@ void FeatureSpace::initialize_fs(std::string project_type)
         sum_file_stream.close();
     }
 
-    if(project_type.compare("regression") == 0)
+    if(_project_type.compare("regression") == 0)
     {
         _project = project_funcs::project_r2;
         _project_no_omp = project_funcs::project_r2_no_omp;
     }
-    else if(project_type.compare("classification") == 0)
+    else if(_project_type.compare("classification") == 0)
     {
         _project = project_funcs::project_classify;
         _project_no_omp = project_funcs::project_classify_no_omp;
     }
-    else if(project_type.compare("log_regression") == 0)
+    else if(_project_type.compare("log_regression") == 0)
     {
         if(_task_sizes.size() > 1)
             throw std::logic_error("Log Regression can not be done using multiple tasks.");
@@ -183,47 +184,73 @@ void FeatureSpace::initialize_fs(std::string project_type)
     _scores.resize(_phi.size());
 }
 
-void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
-{
-    int phi_ind = feat - _phi.begin();
-    feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
-
-    for(auto& op : _un_operators)
+#ifdef PARAMETERIZE
+    void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, std::shared_ptr<NLOptimizer> optimizer, double l_bound, double u_bound)
     {
-        op(feat_set, *feat, feat_ind, l_bound, u_bound);
-    }
+        int phi_ind = feat - _phi.begin();
+        feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
 
-    for(auto& op : _com_bin_operators)
-    {
-        for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
-            op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
-    }
+        for(auto& op : _un_operators)
+        {
+            op(feat_set, *feat, feat_ind, l_bound, u_bound);
+        }
 
-    for(auto& op : _bin_operators)
-    {
-        for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+        for(auto& op : _com_bin_operators)
         {
-            op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
-            op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+            {
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            }
         }
-    }
-    #ifdef PARAMETERIZE
         for(auto& op : _un_param_operators)
-            op(feat_set, *feat, feat_ind, l_bound, u_bound, _prop);
+            op(feat_set, *feat, feat_ind, l_bound, u_bound, optimizer);
 
         for(auto& op : _com_bin_param_operators)
             for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
-                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
         for(auto& op : _bin_param_operators)
         {
             for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
             {
-                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
-                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, _prop);
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, optimizer);
             }
         }
-    #endif
-}
+    }
+#else
+    void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
+    {
+        int phi_ind = feat - _phi.begin();
+        feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
+
+        for(auto& op : _un_operators)
+        {
+            op(feat_set, *feat, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _com_bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+            {
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            }
+        }
+    }
+#endif
 
 void FeatureSpace::generate_feature_space()
 {
@@ -245,16 +272,31 @@ void FeatureSpace::generate_feature_space()
         int feat_ind = _phi.size();
         node_value_arrs::clear_temp_reg();
         double start = omp_get_wtime();
-        #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
-        {
-            std::vector<node_ptr> next_phi_private;
-            #pragma omp for schedule(dynamic)
-            for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
-                generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
+        #ifdef PARAMETERIZE
+            #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
+            {
+                std::vector<node_ptr> next_phi_private;
+                std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
 
-            #pragma omp critical
-            next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
-        }
+                #pragma omp for schedule(dynamic)
+                for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
+                    generate_new_feats(feat_1, next_phi_private, feat_ind, optimizer, l_bound, u_bound);
+
+                #pragma omp critical
+                next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
+            }
+        #else
+            #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
+            {
+                std::vector<node_ptr> next_phi_private;
+                #pragma omp for schedule(dynamic)
+                for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
+                    generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
+
+                #pragma omp critical
+                next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
+            }
+        #endif
         _start_gen.push_back(_phi.size());
         node_value_arrs::clear_temp_reg();
         if((nn < _max_phi) || (nn <= _n_rung_store) || (_mpi_comm->size() == 1))
@@ -523,6 +565,10 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
         std::vector<node_ptr> phi_sel_private(phi_sel);
         std::vector<double> scores_sel_private(scores_sel);
 
+        #ifdef PARAMETERIZE
+        std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
+        #endif
+
         int feat_ind = _phi.size();
 
         #pragma omp for schedule(dynamic)
@@ -533,7 +579,11 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
 
             bool is_sel = (*feat)->selected();
             (*feat)->set_selected(false);
-            generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
+            #ifdef PARAMETERIZE
+                generate_new_feats(feat, generated_phi, feat_ind, optimizer, _l_bound, _u_bound);
+            #else
+                generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
+            #endif
             (*feat)->set_selected(is_sel);
 
             if(generated_phi.size() == 0)
diff --git a/src/feature_creation/feature_space/FeatureSpace.hpp b/src/feature_creation/feature_space/FeatureSpace.hpp
index d043ee84..aeccb2ac 100644
--- a/src/feature_creation/feature_space/FeatureSpace.hpp
+++ b/src/feature_creation/feature_space/FeatureSpace.hpp
@@ -61,6 +61,7 @@ class FeatureSpace
 
     const std::vector<int> _task_sizes; //!< The number of elements in each task (training data)
     std::vector<int> _start_gen; //!< list of the indexes where each generation starts in _phi
+    const std::string _project_type; //!< The type of projection that should be done during SIS
     const std::string _feature_space_file; //!< File to store information about the selected features
     const std::string _feature_space_summary_file; //!< File to store information about the selected features
 
@@ -126,10 +127,8 @@ public:
 
     /**
      * @brief Initialize the feature set given a property vector
-     *
-     * @param prop The property trying to be learned
      */
-    void initialize_fs(std::string project_type);
+    void initialize_fs();
 
     /**
      * @brief Generate the full feature set from the allowed operators and initial feature set
@@ -221,18 +220,45 @@ public:
      */
     inline int n_rung_generate(){return _n_rung_generate;}
 
-    /**
-     * @brief Generate a new set of features from a single feature
-     * @details Take in the feature and perform all valid algebraic operations on it.
-     *
-     * @param feat The feature to spawn new features from
-     * @param feat_set The feature set to pull features from for combinations
-     * @param feat_ind starting index for the next feature generated
-     * @param l_bound lower bound for the absolute value of the feature
-     * @param u_bound upper bound for the abosulte value of the feature
-     */
-    void generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound=1e-50, double u_bound=1e50);
-
+    #ifdef PARAMETERIZE
+        /**
+         * @brief Generate a new set of features from a single feature
+         * @details Take in the feature and perform all valid algebraic operations on it.
+         *
+         * @param feat The feature to spawn new features from
+         * @param feat_set The feature set to pull features from for combinations
+         * @param feat_ind starting index for the next feature generated
+         * @param optimizer The object used to optimize the parameterized features
+         * @param l_bound lower bound for the absolute value of the feature
+         * @param u_bound upper bound for the abosulte value of the feature
+         */
+        void generate_new_feats(
+            std::vector<node_ptr>::iterator& feat,
+            std::vector<node_ptr>& feat_set,
+            int& feat_ind,
+            std::shared_ptr<NLOptimizer> optimizer,
+            double l_bound=1e-50,
+            double u_bound=1e50
+        );
+    #else
+        /**
+         * @brief Generate a new set of features from a single feature
+         * @details Take in the feature and perform all valid algebraic operations on it.
+         *
+         * @param feat The feature to spawn new features from
+         * @param feat_set The feature set to pull features from for combinations
+         * @param feat_ind starting index for the next feature generated
+         * @param l_bound lower bound for the absolute value of the feature
+         * @param u_bound upper bound for the abosulte value of the feature
+         */
+        void generate_new_feats(
+            std::vector<node_ptr>::iterator& feat,
+            std::vector<node_ptr>& feat_set,
+            int& feat_ind,
+            double l_bound=1e-50,
+            double u_bound=1e50
+        );
+    #endif
     /**
      * @brief Calculate the SIS Scores for feature generated on the fly
      * @details Create the next rung of features and calculate their projection scores. Only keep those that can be selected by SIS.
diff --git a/src/feature_creation/node/operator_nodes/OperatorNode.hpp b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
index 5f653503..8fbf1cb8 100644
--- a/src/feature_creation/node/operator_nodes/OperatorNode.hpp
+++ b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
@@ -337,13 +337,14 @@ public:
          */
         virtual std::vector<double> parameters() = 0;
 
+        //DocString: op_node_get_params
         /**
          * @brief Solve the non-linear optimization to set the parameters
          * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop) = 0;
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer) = 0;
 
         /**
          * @brief Set the non-linear parameters
@@ -357,7 +358,7 @@ public:
          */
         virtual inline int n_params(int n_cur = 0, int depth = 1)
         {
-            return (depth > nlopt_wrapper::_max_param_depth) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
+            return (depth > nlopt_wrapper::MAX_PARAM_DEPTH) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
         }
 
         /**
@@ -488,33 +489,6 @@ public:
         }
 
         #ifdef PY_BINDINGS
-            // DocString: op_node_param_arr
-            /**
-             * @brief Solve the non-linear optimization to set the parameters
-             * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-             *
-             * @param prop property to fit to get the parameters
-             * @param param_list List describing the parameters to fit
-             */
-            inline void get_parameters(np::ndarray prop)
-            {
-                std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-                get_parameters(prop_vec);
-            }
-
-            // DocString: op_node_param_list
-            /**
-             * @brief Solve the non-linear optimization to set the parameters
-             * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-             *
-             * @param prop property to fit to get the parameters
-             * @param param_list List describing the parameters to fit
-             */
-            inline void get_parameters(py::list prop)
-            {
-                std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-                get_parameters(prop_vec);
-            }
 
             // DocString: op_node_set_param_list
             /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
index 6d5b66ab..2a6a188c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
@@ -156,7 +156,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -191,7 +192,7 @@ public:
             return fmt::format(
                 "|{:.10e}*{}{:+15.10e}|",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -209,7 +210,7 @@ public:
             return fmt::format(
                 "\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
index 58cd9759..17417f96 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsParamNode)
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,40 +17,40 @@ void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 AbsParamNode::AbsParamNode()
 {}
 
-AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
-    AbsNode(feat, feat_ind)
+AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
-    AbsNode(feat, feat_ind)
+AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
-    AbsNode(feat, feat_ind)
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
 }
 
-void AbsParamNode::get_parameters(std::vector<double>& prop)
+void AbsParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    _sign_alpha = 1.0;
+    double min_res = optimizer->optimize_feature_params(this);
     std::vector<double> param_cp(_params);
 
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d) > min_res)
+    if(optimizer->optimize_feature_params(this) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -61,7 +61,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::abs(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +71,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
 void AbsNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::abs(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,9 +81,8 @@ void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
-
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
 
@@ -92,7 +91,7 @@ void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
index ab5ae9af..4c0ec0c9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
@@ -63,7 +63,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -73,7 +73,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -146,7 +146,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -177,7 +178,6 @@ public:
      */
     void update_postfix(std::string& cur_expr, bool add_params=true)
     {
-
         std::stringstream postfix;
         postfix << get_postfix_term();
         if(add_params)
@@ -191,6 +191,6 @@ public:
     }
 };
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
index 4dc4c0e2..11f6bfb3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
@@ -167,7 +167,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -201,9 +202,9 @@ public:
         {
             return fmt::format(
                 "|{} - ({:.10e}*{}{:+15.10e})|",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -220,9 +221,9 @@ public:
         {
             return fmt::format(
                 "\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
index e2b8350e..438b8f01 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsDiffParamNode)
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AbsDiffParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AbsDiffParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
@@ -32,7 +32,7 @@ void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1,
 AbsDiffParamNode::AbsDiffParamNode()
 {}
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop):
+AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     std::map<std::string, int> add_sub_leaves;
@@ -44,7 +44,7 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     double* params = _params.data();
     double* val_ptr = value_ptr();
@@ -58,11 +58,11 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
         throw InvalidFeatureException();
 }
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop):
+AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound):
@@ -71,13 +71,9 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
     _params.resize(n_params(), 0.0);
 }
 
-void AbsDiffParamNode::get_parameters(std::vector<double>& prop)
+void AbsDiffParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void AbsDiffNode::set_value(const double* params, int offset, int depth)
@@ -85,8 +81,8 @@ void AbsDiffNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::abs_diff(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -98,15 +94,15 @@ void AbsDiffNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::abs_diff(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
 
 void AbsDiffNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 {
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
index 1414193b..f3781eb3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
@@ -59,7 +59,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -182,6 +183,6 @@ public:
     }
 };
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
index 47c1a837..3876062f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
@@ -164,7 +164,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -198,9 +199,9 @@ public:
         {
             return fmt::format(
                 "({} + {:.10e}*{}{:+15.10e})",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -217,9 +218,9 @@ public:
         {
             return fmt::format(
                 "\\left({} + {:.3}*{}{:+8.3e}\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
index 28f4814b..25940d1b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AddParamNode)
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
 
@@ -18,21 +18,21 @@ void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 AddParamNode::AddParamNode()
 {}
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -41,13 +41,9 @@ AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void AddParamNode::get_parameters(std::vector<double>& prop)
+void AddParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void AddNode::set_value(const double* params, int offset, int depth)
@@ -55,8 +51,8 @@ void AddNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::add(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -68,8 +64,8 @@ void AddNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::add(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -79,7 +75,7 @@ void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = 0.0;
     ub[1] = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
index e82661af..02533c01 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
index 28a538d1..6f695318 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^3)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^3\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
index 50080f44..1179f0f7 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbParamNode)
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& f
 CbParamNode::CbParamNode()
 {}
 
-CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
     set_value();
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CbParamNode::CbParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CbParamNode::CbParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
     set_value();
 }
 
@@ -43,20 +43,16 @@ CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_b
     _params.resize(n_params(), 0.0);
 }
 
-void CbParamNode::get_parameters(std::vector<double>& prop)
+void CbParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void CbNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cb(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,7 +63,7 @@ void CbNode::set_value(const double* params, int offset, int depth)
 void CbNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cb(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -77,7 +73,7 @@ void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
index 8c029ae5..2d56e25e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CbParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CbParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
index a19a23d4..8d12cc06 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(cbrt({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\sqrt[3]{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
index 50a7436d..1c59e45e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbrtParamNode)
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 CbrtParamNode::CbrtParamNode()
 {}
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -43,17 +43,13 @@ CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double
     _params.resize(n_params(), 0.0);
 }
 
-void CbrtParamNode::get_parameters(std::vector<double>& prop)
+void CbrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, true);
+    _sign_alpha = 1.0;
+    double min_res = optimizer->optimize_feature_params(this, true);
     std::vector<double> param_cp(_params);
-
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d, true) > min_res)
+    if(optimizer->optimize_feature_params(this, true) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -64,7 +60,7 @@ void CbrtNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cbrt(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,7 +71,7 @@ void CbrtNode::set_value(const double* params, int offset, int depth)
 void CbrtNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cbrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -85,7 +81,7 @@ void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2);
@@ -96,7 +92,7 @@ void CbrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
index a8a24bba..424a292c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CbrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CbrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -189,6 +190,6 @@ public:
     }
 };
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
index 8f16af07..f2ed8157 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(cos({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\cos{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
index 5f3e8f34..a9e9e926 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CosParamNode)
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         return;
 
-    node_ptr new_feat = std::make_shared<CosParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CosParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 CosParamNode::CosParamNode()
 {}
 
-CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CosNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CosParamNode::CosParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CosParamNode::CosParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CosNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void CosParamNode::get_parameters(std::vector<double>& prop)
+void CosParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void CosNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cos(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void CosNode::set_value(const double* params, int offset, int depth)
 void CosNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cos(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
index bac3a7b1..fb6ace4c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
@@ -61,7 +61,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CosParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CosParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
index 9399d2d1..8f862c70 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
@@ -164,7 +164,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -198,9 +199,9 @@ public:
         {
             return fmt::format(
                 "({} / ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -217,9 +218,9 @@ public:
         {
             return fmt::format(
                 "\\left(\\frac{{ {} }}{{ {:.3e}*{}{:+8.3e} }} \\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
index 12b02bb2..7c81fd0c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
@@ -2,13 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(DivParamNode)
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
         return;
 
-    node_ptr new_feat = std::make_shared<DivParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<DivParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -20,26 +20,26 @@ void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 DivParamNode::DivParamNode()
 {}
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
      if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     set_value();
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -48,13 +48,9 @@ DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void DivParamNode::get_parameters(std::vector<double>& prop)
+void DivParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, rung() > 1);
+    double min_res = optimizer->optimize_feature_params(this, rung() > 1);
 }
 
 void DivNode::set_value(const double* params, int offset, int depth)
@@ -62,8 +58,8 @@ void DivNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::div(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,8 +71,8 @@ void DivNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::div(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -86,7 +82,7 @@ void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
index 4ba1fb4d..35f5a89a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
index d6e5aa80..d00bf403 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(exp({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\exp{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
index 7c43d7db..659118db 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(ExpParamNode)
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         return;
 
-    node_ptr new_feat = std::make_shared<ExpParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<ExpParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 ExpParamNode::ExpParamNode()
 {}
 
-ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void ExpParamNode::get_parameters(std::vector<double>& prop)
+void ExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this, true);
 }
 
 void ExpNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::exp(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void ExpNode::set_value(const double* params, int offset, int depth)
 void ExpNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -83,7 +79,7 @@ void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent) = 1.0;
     *(ub - from_parent) = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
index 0beb38e9..6fae956f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    ExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    ExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
index cdcf4a10..642a360e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
@@ -150,7 +150,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -184,7 +185,7 @@ public:
             return fmt::format(
                 "(1.0 / ({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -202,7 +203,7 @@ public:
             return fmt::format(
                 "\\left(\\frac{{1}}{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
index a5da2982..9f923fc3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(InvParamNode)
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
         return;
 
-    node_ptr new_feat = std::make_shared<InvParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<InvParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 InvParamNode::InvParamNode()
 {}
 
-InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     InvNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-InvParamNode::InvParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+InvParamNode::InvParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     InvNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void InvParamNode::get_parameters(std::vector<double>& prop)
+void InvParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, rung() > 1);
+    double min_res = optimizer->optimize_feature_params(this, rung() > 1);
 }
 
 void InvNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::inv(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void InvNode::set_value(const double* params, int offset, int depth)
 void InvNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::inv(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void InvNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
index b8706b71..c926e57e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    InvParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    InvParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
index 061ebd56..0d515e89 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(ln({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\ln{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
index 0b5e462a..cfc6a5d9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(LogParamNode)
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
         return;
 
-    node_ptr new_feat = std::make_shared<LogParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<LogParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 LogParamNode::LogParamNode()
 {}
 
-LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-LogParamNode::LogParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+LogParamNode::LogParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void LogParamNode::get_parameters(std::vector<double>& prop)
+void LogParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void LogNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::log(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void LogNode::set_value(const double* params, int offset, int depth)
 void LogNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::log(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent + 1) = 0.0;
     *(ub - from_parent + 1) = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
index 53486f81..0d75c274 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    LogParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    LogParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
index 1b47135d..898aae56 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
@@ -165,7 +165,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -199,9 +200,9 @@ public:
         {
             return fmt::format(
                 "({} * ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -218,9 +219,9 @@ public:
         {
             return fmt::format(
                 "\\left({} * \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
index 51f52122..f83edc46 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(MultParamNode)
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,11 +17,11 @@ void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, no
 MultParamNode::MultParamNode()
 {}
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop):
+MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
@@ -33,20 +33,16 @@ MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, dou
     _params.resize(n_params(), 0.0);
 }
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop):
+MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
-void MultParamNode::get_parameters(std::vector<double>& prop)
+void MultParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void MultNode::set_value(const double* params, int offset, int depth)
@@ -54,8 +50,8 @@ void MultNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::mult(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,8 +63,8 @@ void MultNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::mult(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -78,7 +74,7 @@ void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
index 55f5ecd1..a3bc169c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
index 132b4046..9f2b0d3f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(exp(-1.0 * {:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\exp{{ \\left(-\\left({:.3e}{}{:+8.3e} \\right)\\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
index 7fa5cd2c..b58b87aa 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(NegExpParamNode)
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         return;
 
-    node_ptr new_feat = std::make_shared<NegExpParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<NegExpParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, in
 NegExpParamNode::NegExpParamNode()
 {}
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         throw InvalidFeatureException();
 
     _params.resize(n_params(),  0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
     _params.resize(n_params(),  0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, do
     _params.resize(n_params(),  0.0);
 }
 
-void NegExpParamNode::get_parameters(std::vector<double>& prop)
+void NegExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this, true);
 }
 
 void NegExpNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::neg_exp(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void NegExpNode::set_value(const double* params, int offset, int depth)
 void NegExpNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::neg_exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -83,7 +79,7 @@ void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent) = 1.0;
     *(ub - from_parent) = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
index 8e439688..920c1132 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    NegExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    NegExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
index c11f4af7..fb7564ca 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SinParamNode)
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         return;
 
-    node_ptr new_feat = std::make_shared<SinParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SinParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 SinParamNode::SinParamNode()
 {}
 
-SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SinNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SinParamNode::SinParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SinParamNode::SinParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SinNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void SinParamNode::get_parameters(std::vector<double>& prop)
+void SinParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SinNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sin(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void SinNode::set_value(const double* params, int offset, int depth)
 void SinNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sin(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = -1.0 * M_PI;
     ub[1] = M_PI;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
index aab0ed15..06df38e4 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SinParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SinParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
index 5ae01ef6..10977296 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(sin({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\sin{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
index 2a194341..552f71ab 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SixPowParamNode)
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, in
 SixPowParamNode::SixPowParamNode()
 {}
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -40,20 +40,16 @@ SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, do
     _params.resize(n_params(), 0.0);
 }
 
-void SixPowParamNode::get_parameters(std::vector<double>& prop)
+void SixPowParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SixPowNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sixth_pow(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -64,7 +60,7 @@ void SixPowNode::set_value(const double* params, int offset, int depth)
 void SixPowNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sixth_pow(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -74,7 +70,7 @@ void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
index d7bd87d2..296f3950 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
@@ -61,7 +61,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SixPowParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SixPowParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
index 8214b839..2802c95a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -190,7 +191,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^6)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -208,7 +209,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^6\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
index b5237be2..cb073234 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqParamNode)
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& f
 SqParamNode::SqParamNode()
 {}
 
-SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SqParamNode::SqParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SqParamNode::SqParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -40,21 +40,16 @@ SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_b
     _params.resize(n_params(), 0.0);
 }
 
-void SqParamNode::get_parameters(std::vector<double>& prop)
+void SqParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-    double minf;
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SqNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sq(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -65,7 +60,7 @@ void SqNode::set_value(const double* params, int offset, int depth)
 void SqNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sq(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -75,7 +70,7 @@ void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
index a387ecba..ce3f7a2e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SqParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SqParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
index 86c4e419..9e311f89 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^2)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^2\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
index dada6dd3..9120db2a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqrtParamNode)
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 SqrtParamNode::SqrtParamNode()
 {}
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -43,17 +43,13 @@ SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double
     _params.resize(n_params(), 0.0);
 }
 
-void SqrtParamNode::get_parameters(std::vector<double>& prop)
+void SqrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
     std::vector<double> param_cp(_params);
 
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d) > min_res)
+    if(optimizer->optimize_feature_params(this) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -64,7 +60,7 @@ void SqrtNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sqrt(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,7 +71,7 @@ void SqrtNode::set_value(const double* params, int offset, int depth)
 void SqrtNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sqrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -85,7 +81,7 @@ void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
@@ -96,7 +92,7 @@ void SqrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
index ef693329..173b09e9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
@@ -62,7 +62,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SqrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SqrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -190,6 +191,6 @@ public:
     }
 };
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
index 6d347f92..aea3fd13 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(sqrt({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\sqrt{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
index 334eff51..6ba6d443 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SubParamNode)
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 SubParamNode::SubParamNode()
 {}
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -40,13 +40,9 @@ SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void SubParamNode::get_parameters(std::vector<double>& prop)
+void SubParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SubNode::set_value(const double* params, int offset, int depth)
@@ -54,8 +50,8 @@ void SubNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::sub(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,8 +63,8 @@ void SubNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::sub(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -78,7 +74,7 @@ void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = 0.0;
     ub[1] = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
index c4144e08..5038b556 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
index 5326d904..50191586 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
@@ -165,7 +165,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -199,9 +200,9 @@ public:
         {
             return fmt::format(
                 "({} - ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -218,9 +219,9 @@ public:
         {
             return fmt::format(
                 "\\left({} - \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_ops.hpp b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
index dc079048..740f9939 100644
--- a/src/feature_creation/node/operator_nodes/allowed_ops.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
@@ -32,8 +32,8 @@ typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, doubl
 typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double)> bin_op_node_gen;
 
 #ifdef PARAMETERIZE
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, double, std::vector<double>&)> un_param_op_node_gen;
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double, std::vector<double>&)> bin_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, double, std::shared_ptr<NLOptimizer>)> un_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double, std::shared_ptr<NLOptimizer>)> bin_param_op_node_gen;
 #endif
 
 namespace allowed_op_maps
diff --git a/src/nl_opt/NLOptWrapper.cpp b/src/nl_opt/NLOptWrapper.cpp
index c88ae120..fb68beae 100644
--- a/src/nl_opt/NLOptWrapper.cpp
+++ b/src/nl_opt/NLOptWrapper.cpp
@@ -1,78 +1,62 @@
 #include <nl_opt/NLOptWrapper.hpp>
-
-double (*nlopt_wrapper::_objective)(unsigned int n, const double* p, double* grad, void* data) = nlopt_wrapper::objective_reg;
-
-std::vector<double> nlopt_wrapper::_feature_gradient;
-std::vector<double> nlopt_wrapper::_zeros;
-std::vector<int> nlopt_wrapper::_task_sizes;
-std::vector<double> nlopt_wrapper::_residuals;
-std::vector<double> nlopt_wrapper::_work;
-std::vector<double> nlopt_wrapper::_a_copy;
-std::vector<double> nlopt_wrapper::_prop_copy;
-
-int nlopt_wrapper::_max_param_depth;
-
-nlopt::algorithm nlopt_wrapper::_local_opt_alg = nlopt::LD_VAR2;
-double nlopt_wrapper::_cauchy_scaling = 0.5 * 0.5;
-int nlopt_wrapper::_n_samp = 0;
-
-ConvexHull1D nlopt_wrapper::_convex_hull;
-
-void nlopt_wrapper::setup_data(std::vector<int> task_sizes, int max_dim, int n_rung, int max_param_depth)
+int nlopt_wrapper::MAX_PARAM_DEPTH = -1;
+NLOptimizer::NLOptimizer(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, nlopt::func objective, int max_param_depth, bool reset_max_param_depth) :
+    _objective(objective),
+    _a(*std::max_element(task_sizes.begin(), task_sizes.end()) * 2),
+    _prop(prop),
+    _prop_copy(prop),
+    _work(prop.size(), 0.0),
+    _zeros(prop.size(), 0.0),
+    _task_sizes(task_sizes),
+    _n_samp(std::accumulate(task_sizes.begin(), task_sizes.end(), 0.0)),
+    _n_rung(n_rung),
+    _max_params(2 * task_sizes.size()),
+    _max_param_depth(std::min(n_rung, max_param_depth)),
+    _local_opt_alg(nlopt::LD_VAR2)
 {
-    if(max_param_depth == -1)
+    if(prop.size() != _n_samp)
+        throw std::logic_error("Property vector size (" + std::to_string(_prop.size()) + ") and number of samples (" + std::to_string(_n_samp) + ") are not the same");
+
+    if(_max_param_depth == -1)
         _max_param_depth = n_rung;
-    else
-        _max_param_depth = std::min(n_rung, max_param_depth);
-    _task_sizes = task_sizes;
-    _zeros.resize(50, 0.0);
-    _n_samp = std::accumulate(task_sizes.begin(), task_sizes.end(), 0.0);
 
-    int max_params = 2 * task_sizes.size();
-    for(int rr = 1; rr <= n_rung; ++rr)
-        max_params += std::pow(2, rr);
+    if(reset_max_param_depth || (nlopt_wrapper::MAX_PARAM_DEPTH == -1))
+        nlopt_wrapper::MAX_PARAM_DEPTH = _max_param_depth;
+    else if(nlopt_wrapper::MAX_PARAM_DEPTH != _max_param_depth)
+        throw std::logic_error("_max_param_depth (" + std::to_string(_max_param_depth) + ") is not the same as the global one (" + std::to_string(nlopt_wrapper::MAX_PARAM_DEPTH) + ").");
 
-    #pragma omp parallel
-    {
-        _work.resize(_n_samp * max_dim, 0.0);
-        _a_copy.resize(_n_samp * 2, 0.0);
-        _residuals.resize(_n_samp, 0.0);
-        _feature_gradient.resize(max_params * _n_samp, 0.0);
-        _prop_copy.resize(_n_samp, 0.0);
-    }
+    for(int rr = 1; rr <= _max_param_depth; ++rr)
+        _max_params += std::pow(2, rr);
 }
 
-void nlopt_wrapper::set_objective(std::string calc_type, double* prop, const std::vector<int> sizes, int n_rung, int max_param_depth)
+NLOptimizerClassification::NLOptimizerClassification(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, bool reset_max_param_depth) :
+    NLOptimizer(task_sizes, prop, n_rung, nlopt_wrapper::objective_class, max_param_depth, reset_max_param_depth)
 {
-    if(calc_type.compare("classification") == 0)
-    {
-        #pragma omp parallel
-        _convex_hull.initialize_prop(sizes, prop);
-        _objective = objective_class;
-        _local_opt_alg = nlopt::LN_SBPLX;
-        setup_data(sizes, 1, 0, max_param_depth);
-    }
-    else if(calc_type.compare("regression") == 0)
-    {
-        _objective = objective_reg;
-        setup_data(sizes, 1, n_rung, max_param_depth);
-    }
-    else if(calc_type.compare("log_regression") == 0)
-    {
-        _objective = objective_log_reg;
-        setup_data(sizes, 1, n_rung, max_param_depth);
-    }
-    else
-    {
-        throw std::logic_error("projection type can not determined");
-    }
+    _convex_hull = std::make_shared<ConvexHull1D>(task_sizes, _prop.data());
+    _local_opt_alg = nlopt::LN_SBPLX;
 }
 
-double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
+NLOptimizerRegression::NLOptimizerRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool log_reg, bool reset_max_param_depth) :
+    NLOptimizer(task_sizes, prop, n_rung, log_reg ? nlopt_wrapper::objective_log_reg : nlopt_wrapper::objective_reg, max_param_depth, reset_max_param_depth),
+    _feature_gradient(_max_params * _n_samp, 0.0),
+    _residuals(_n_samp, 0.0),
+    _cauchy_scaling(cauchy_scaling * cauchy_scaling)
+{}
+
+NLOptimizerLogRegression::NLOptimizerLogRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool reset_max_param_depth) :
+    NLOptimizerRegression(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, true, reset_max_param_depth)
+{}
+
+double NLOptimizer::optimize_feature_params(Node* feat, bool use_simplex)
 {
+    nlopt_wrapper::feat_data data;
+    data._feat = feat;
+    data._prop = _prop.data();
+    data._optimizer = this;
+
     double minf = 0.0;
-    std::vector<double> params(data._feat->parameters().size() + 2 * _task_sizes.size(), 1.0);
-    std::vector<double> params_final(data._feat->parameters().size(), 1.0);
+    std::vector<double> params(feat->parameters().size() + 2 * _task_sizes.size(), 1.0);
+    std::vector<double> params_final(feat->parameters().size(), 1.0);
 
     dcopy_(params.size() / 2, _zeros.data(), 1, &params[1], 2);
     dcopy_(params_final.size() / 2, _zeros.data(), 1, &params_final[1], 2);
@@ -80,15 +64,16 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
     std::vector<double> lb_global(params.size(), -1e2);
     std::vector<double> ub_global(params.size(),  1e2);
 
-    data._feat->set_bounds(lb_global.data() + 2 * _task_sizes.size(), ub_global.data() + 2 * _task_sizes.size());
+    feat->set_bounds(lb_global.data() + 2 * _task_sizes.size(), ub_global.data() + 2 * _task_sizes.size());
 
     bool scale_b = (lb_global[_task_sizes.size() * 2 - 2] != ub_global[_task_sizes.size() * 2 - 2]);
     bool scale_c = (lb_global[_task_sizes.size() * 2 - 1] != ub_global[_task_sizes.size() * 2 - 1]);
     int n_dim = scale_b + scale_c;
     int start = 0;
 
-    double* val_ptr = data._feat->value_ptr(params_final.data());
-    std::copy_n(data._prop, _n_samp, _prop_copy.data());
+    double* val_ptr = feat->value_ptr(params_final.data());
+    std::copy_n(_prop.data(), _n_samp, _prop_copy.data());
+
     for(int tt = 0; tt < _task_sizes.size(); ++tt)
     {
         lb_global[tt * 2] = lb_global[_task_sizes.size() * 2 - 2];
@@ -97,12 +82,12 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
         ub_global[tt * 2] = ub_global[_task_sizes.size() * 2 - 2];
         ub_global[tt * 2 + 1] = ub_global[_task_sizes.size() * 2 - 1];
 
-        std::fill_n(_a_copy.data(), _a_copy.size(), 1.0);
+        std::fill_n(_a.data(), _a.size(), 1.0);
         if(scale_b)
-            std::copy_n(val_ptr + start, _task_sizes[tt], _a_copy.data());
+            std::copy_n(val_ptr + start, _task_sizes[tt], _a.data());
 
         int info = 0;
-        dgels_('N', _task_sizes[tt], n_dim, 1, _a_copy.data(), _task_sizes[tt], &_prop_copy[start], _task_sizes[tt], _work.data(), _work.size(), &info);
+        dgels_('N', _task_sizes[tt], n_dim, 1, _a.data(), _task_sizes[tt], &_prop_copy[start], _task_sizes[tt], _work.data(), _work.size(), &info);
         if(info == 0)
         {
             params[tt * 2] = scale_b * _prop_copy[start] + (!scale_b);
@@ -110,15 +95,29 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
         }
         start += _task_sizes[tt];
     }
+    std::transform(
+        lb_global.begin(),
+        lb_global.end(),
+        params.begin(),
+        params.begin(),
+        [](double lb, double p){return p < lb ? lb : p;}
+    );
+
+    std::transform(
+        ub_global.begin(),
+        ub_global.end(),
+        params.begin(),
+        params.begin(),
+        [](double ub, double p){return p > ub ? ub : p;}
+    );
 
     nlopt::opt opt_global(nlopt::GN_ISRES, params.size());
-    opt_global.set_min_objective(nlopt_wrapper::_objective, &data);
+    opt_global.set_min_objective(_objective, &data);
     opt_global.set_maxeval(2500);
     opt_global.set_xtol_rel(1e-2);
     opt_global.set_lower_bounds(lb_global);
     opt_global.set_upper_bounds(ub_global);
 
-
     nlopt::opt opt_local((use_simplex ? nlopt::LN_SBPLX : _local_opt_alg), params.size());
     opt_local.set_min_objective(_objective, &data);
     opt_local.set_maxeval(2500);
@@ -145,6 +144,211 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
     {
         minf = HUGE_VAL;
     }
-    data._feat->set_parameters(params_final);
+    feat->set_parameters(params_final);
     return !std::isnan(minf) ? minf : std::numeric_limits<double>::infinity();
 }
+
+double nlopt_wrapper::objective_class(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    return d->_optimizer->convex_hull()->overlap_1d(d->_feat->value_ptr(p));
+}
+
+double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    double* val_ptr = d->_feat->value_ptr(p + 2 * d->_optimizer->task_sizes().size());
+
+    int start = 0;
+    std::fill_n(d->_optimizer->feature_gradient(0), n * d->_optimizer->n_samp(), 0.0);
+    for(int tt = 0; tt < d->_optimizer->task_sizes().size(); ++tt)
+    {
+        // Calculate the residual
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->residuals(start),
+            [p, tt, d](double vp, double prop){
+                return d->_optimizer->cauchy_scaling() * std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / d->_optimizer->cauchy_scaling()) / d->_optimizer->n_samp();
+            }
+        );
+
+        // Calculate the base of the gradient for each step:
+
+        // Contribution to the derivative from (p - (\alpha_task * feat_val + a_task) )^2
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [p, tt](double vp, double prop){
+                return prop - (vp * p[2*tt] + p[2*tt + 1]);
+            }
+        );
+
+        // Contribution from log(1 + s/a^2)
+        std::transform(
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [d](double s){
+                return -2.0 / (1.0 + std::pow(s, 2.0) / d->_optimizer->cauchy_scaling()) * s;
+            }
+        );
+
+        // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * f_i
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt) * d->_optimizer->n_samp() + start),
+            [](double vp, double s){
+                return vp * s;
+            }
+        );
+        // Calculate the gradients of the individual feature parameters
+        // First Calculate contribution from \partial s / \partial p
+        for(int pp = 0; pp < d->_feat->n_params() / 2; ++pp)
+        {
+            // \partial s_i / \partial(\alpha_f or a_f) = \partial s_i/\partial a_task * \alpha_task
+            std::transform(
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+                d->_optimizer->feature_gradient(2 * (d->_optimizer->task_sizes().size() + pp) * d->_optimizer->n_samp() + start),
+                [p, tt](double s){return p[2 * tt] * s;}
+            );
+            std::copy_n(
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                d->_optimizer->task_sizes()[tt],
+                d->_optimizer->feature_gradient((2 * (pp + d->_optimizer->task_sizes().size()) + 1) * d->_optimizer->n_samp() + start)
+            );
+        }
+        start += d->_optimizer->task_sizes()[tt];
+    }
+    // Add the component from the feature gradient
+    d->_feat->gradient(
+        d->_optimizer->feature_gradient(d->_optimizer->n_samp() * d->_optimizer->task_sizes().size() * 2),
+        d->_optimizer->work(),
+        p + 2 * d->_optimizer->task_sizes().size()
+    );
+
+    if(grad)
+    {
+        // Total the individual residual derivatives
+        for(int pp = 0; pp < 2 * d->_optimizer->task_sizes().size() + d->_feat->n_params(); ++ pp)
+            grad[pp] = 1.0 / d->_optimizer->n_samp() * std::accumulate(
+                d->_optimizer->feature_gradient(pp * d->_optimizer->n_samp()),
+                d->_optimizer->feature_gradient((pp + 1) * d->_optimizer->n_samp()),
+                0.0
+            );
+    }
+    return std::accumulate(d->_optimizer->residuals(0), d->_optimizer->residuals(d->_optimizer->n_samp()), 0.0);
+}
+
+double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    double* val_ptr = d->_feat->value_ptr(p + 2 * d->_optimizer->task_sizes().size());
+
+    std::fill_n(d->_optimizer->feature_gradient(0), n * d->_optimizer->n_samp(), 0.0);
+    int start = 0;
+    for(int tt = 0; tt < d->_optimizer->task_sizes().size(); ++tt)
+    {
+        // Calculate the residual
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->residuals(start),
+            [p, tt, d](double vp, double prop){
+                return d->_optimizer->cauchy_scaling() * std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / d->_optimizer->cauchy_scaling());
+            }
+        );
+
+        // Calculate the base of the gradient for each step
+
+        // Contribution to the derivative from (p - (\alpha_task * log(feat_val) + a_task) )^2
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [p, tt](double vp, double prop){
+                return prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]);
+            }
+        );
+
+        // Contribution from log(1 + s/a^2)
+        std::transform(
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start +d->_optimizer->task_sizes()[tt]),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [d](double s){
+                return -2.0 / (1.0 + std::pow(s / d->_optimizer->cauchy_scaling(), 2.0)) * s;
+            }
+        );
+
+        // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * log(f_i)
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient(2 * tt * d->_optimizer->n_samp() + start),
+            [](double vp, double s){
+                return vp * std::log(s);
+            }
+        );
+
+        for(int pp = 0; pp < d->_feat->n_params(); ++pp)
+        {
+            // \partial s_i / \partial(\alpha_f or a_f) = \partial s_i/\partial a_task * \alpha_task / feat_i
+            std::transform(
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+                val_ptr + start,
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                [p, tt](double s, double vp){return p[2 * tt] * s / vp;}
+            );
+            std::copy_n(
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                d->_optimizer->task_sizes()[tt],
+                d->_optimizer->feature_gradient((2 * (pp + d->_optimizer->task_sizes().size()) + 1) * d->_optimizer->n_samp() + start)
+            );
+        }
+        start += d->_optimizer->task_sizes()[tt];
+    }
+
+    // Add the component from the feature gradient
+    d->_feat->gradient(
+        d->_optimizer->feature_gradient(d->_optimizer->n_samp() * d->_optimizer->task_sizes().size() * 2),
+        d->_optimizer->work(),
+        p + 2 * d->_optimizer->task_sizes().size()
+    );
+
+    if(grad)
+    {
+        for(int pp = 0; pp < 2 * d->_optimizer->task_sizes().size() + d->_feat->n_params(); ++ pp)
+            grad[pp] = 1.0 / d->_optimizer->n_samp() * std::accumulate(
+                d->_optimizer->feature_gradient(pp * d->_optimizer->n_samp()),
+                d->_optimizer->feature_gradient((pp + 1) * d->_optimizer->n_samp()),
+                0.0
+            );
+    }
+
+    return std::accumulate(d->_optimizer->residuals(0), d->_optimizer->residuals(d->_optimizer->n_samp()), 0.0);
+}
+
+std::shared_ptr<NLOptimizer> nlopt_wrapper::get_optimizer(std::string project_type, const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool reset_max_param_depth)
+{
+    if(project_type.compare("classification") == 0)
+        return std::make_shared<NLOptimizerClassification>(task_sizes, prop, n_rung, max_param_depth, reset_max_param_depth);
+    else if(project_type.compare("regression") == 0)
+        return std::make_shared<NLOptimizerRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+    else if(project_type.compare("log_regression") == 0)
+        return std::make_shared<NLOptimizerLogRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+    else
+        throw std::logic_error("Invalid project type (" + project_type + ") was passed to get_optimizer.");
+
+    return nullptr;
+}
diff --git a/src/nl_opt/NLOptWrapper.hpp b/src/nl_opt/NLOptWrapper.hpp
index 300c9616..75b2cf35 100644
--- a/src/nl_opt/NLOptWrapper.hpp
+++ b/src/nl_opt/NLOptWrapper.hpp
@@ -11,288 +11,462 @@
 #include <feature_creation/node/Node.hpp>
 #include <classification/ConvexHull1D.hpp>
 
-namespace nlopt_wrapper
+class NLOptimizer
 {
-    extern double (*_objective)(unsigned int n, const double* p, double* grad, void* data); //!< Objective function to use
+protected:
+    nlopt::func _objective; //!< Objective function to use
+
+    std::vector<double> _a; //!< vector to store the A matrix for dgels
+    std::vector<double> _prop; //!< The property to fit the functions against
+    std::vector<double> _prop_copy; //!< Copy of the property to keep for dgels
+    std::vector<double> _work; //!< work array for dgels
+    std::vector<double> _zeros; //!< array of zeros to fill parameters
+    std::vector<int> _task_sizes; //!< number of samples in each task
+
+    int _n_samp; //!< total number of samples
+    int _n_rung; //!< Maximum rung of the features
+    int _max_params; //!< Maximum number of possible parameters
+    int _max_param_depth; //!< parameterize features to all depths of the tree
+
+    nlopt::algorithm _local_opt_alg; //!< Algorithm used for local optimization
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     */
+    NLOptimizer(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, nlopt::func objective, int max_param_depth=-1, bool reset_max_param_depth=false);
 
-    extern std::vector<double> _feature_gradient; //!< vector used to calculate the contribution of feature derivatives to the gradient
-    extern std::vector<double> _zeros; //!< array of zeros to fill parameters
-    extern std::vector<double> _residuals; //!< storage space for the residuals
-    extern std::vector<double> _work; //!< work array for dgels
-    extern std::vector<double> _a_copy; //!< Copy of the initial a vector for least squares problems
-    extern std::vector<double> _prop_copy; //!< Copy of the property vector
-    extern std::vector<int> _task_sizes; //!< tasks sizes
-    extern double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
-    extern int _n_samp; //!< total number of samples
-    extern nlopt::algorithm _local_opt_alg; //!< Algorithm used for local optimization
-    extern int _max_param_depth; //!< parameterize features to all depths of the tree
+    // DocString: nloptimizer_optimize_feature_params
+    /**
+     * @brief uses nlopt to optimize the parameters of a feature
+     *
+     * @param data data structure that passes the feature and prop to nlopt
+     * @param use_simplex If true use a Nelder-Mead type optimizer (LN_SBPLEX) for local optimization
+     *                    Otherwise use a gradient decent based approach if possible
+     */
+    double optimize_feature_params(Node* feat, bool use_simplex=false);
+
+    inline std::vector<int>& task_sizes(){return _task_sizes;}
+    inline std::vector<double>& prop(){return _prop;}
+    inline double* work(){return _work.data();}
+    inline int n_samp(){return _n_samp;}
+    inline int n_rung(){return _n_rung;}
+    inline int max_params(){return _max_params;}
+    inline int max_param_depth(){return _max_param_depth;}
+    inline nlopt::algorithm local_opt_alg (){return _local_opt_alg;}
+
+    virtual std::shared_ptr<ConvexHull1D> convex_hull() = 0;
+    virtual double* feature_gradient(int ind) = 0;
+    virtual double* residuals(int ind) = 0;
+    virtual double cauchy_scaling() = 0;
+};
+
+class NLOptimizerClassification: public NLOptimizer
+{
+protected:
+    std::shared_ptr<ConvexHull1D> _convex_hull; //!< Object to perform classification
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     */
+    NLOptimizerClassification(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, bool reset_max_param_depth=false);
 
-    extern ConvexHull1D _convex_hull; //!< Object to perform classification
+    inline std::shared_ptr<ConvexHull1D> convex_hull(){return _convex_hull;}
+    inline double* feature_gradient(int ind){return nullptr;}
+    inline double* residuals(int ind){return nullptr;}
+    inline double cauchy_scaling(){return 0.0;}
+};
 
-    #pragma omp threadprivate(_work, _a_copy, _residuals, _feature_gradient, _prop_copy, _convex_hull)
+class NLOptimizerRegression: public NLOptimizer
+{
+protected:
+    std::vector<double> _feature_gradient; //!< vector used to calculate the contribution of feature derivatives to the gradient
+    std::vector<double> _residuals; //!< storage space for the residuals
+    double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     */
+    NLOptimizerRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool log_reg=false, bool reset_max_param_depth=false);
 
-    typedef struct
-    {
-        double* _prop; //!< pointer to the property vector
-        double* _a; //!< Node pointer of the feature to parameterize
-        int _n_feat; //!< Node pointer of the feature to parameterize
-    } l0_data;
+    inline std::shared_ptr<ConvexHull1D> convex_hull(){return nullptr;}
+    inline double* feature_gradient(int ind){return &_feature_gradient[ind];}
+    inline double* residuals(int ind){return &_residuals[ind];}
+    inline double cauchy_scaling(){return _cauchy_scaling;}
+};
+
+class NLOptimizerLogRegression: public NLOptimizerRegression
+{
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     */
+    NLOptimizerLogRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool reset_max_param_depth=false);
+};
+
+namespace nlopt_wrapper
+{
+    extern int MAX_PARAM_DEPTH;
 
     typedef struct
     {
         double* _prop; //!< pointer to the property vector
         Node* _feat; //!< Node pointer of the feature to parameterize
+        NLOptimizer* _optimizer; //!< Data structure to store information for the optimization
     } feat_data;
 
 
-    static double objective_class(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        return _convex_hull.overlap_1d(d->_feat->value_ptr(p));
-    }
+    static double objective_class(unsigned int n, const double* p, double* grad, void* data);
 
-    static double objective_reg(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        double* val_ptr = d->_feat->value_ptr(p + 2 * _task_sizes.size());
+    static double objective_reg(unsigned int n, const double* p, double* grad, void* data);
 
-        int start = 0;
-        std::fill_n(_feature_gradient.data(), _feature_gradient.size(), 0.0);
-        for(int tt = 0; tt < _task_sizes.size(); ++tt)
-        {
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_residuals[start],
-                [p, tt](double vp, double prop){
-                    return _cauchy_scaling * std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / _cauchy_scaling) / _n_samp;
-                }
-            );
-
-            // Calculate the base of the gradient for each step
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double prop){
-                    return prop - (vp * p[2*tt] + p[2*tt + 1]);
-                }
-            );
+    static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data);
 
-            std::transform(
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start +_task_sizes[tt]],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [](double s){
-                    return -2.0 / (1.0 + std::pow(s, 2.0) / _cauchy_scaling) * s;
-                }
-            );
+    /**
+     * @brief Get an optimizer for the desired task
+     *
+     * @param project_type The type of projection operator to optimize the features for
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     *
+     * @return The correct optimizer
+     */
+    std::shared_ptr<NLOptimizer> get_optimizer(std::string project_type, const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool reset_max_param_depth=false);
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                &_feature_gradient[(2 * tt) * _n_samp + start],
-                [](double vp, double s){
-                    return vp * s;
-                }
+    #ifdef PY_BINDINGS
+        // DocString: nlopt_wrapper_get_reg_optimizer_list_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
-            for(int pp = 0; pp < d->_feat->n_params() / 2; ++pp)
-            {
-                std::transform(
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start + _task_sizes[tt]],
-                    &_feature_gradient[2 * (_task_sizes.size() + pp) * _n_samp + start],
-                    [p, tt](double s){return p[2 * tt] * s;}
-                );
-                std::copy_n(
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    _task_sizes[tt],
-                    &_feature_gradient[(2 * (pp + _task_sizes.size()) + 1) * _n_samp + start]
-                );
-            }
-            start += _task_sizes[tt];
         }
-        d->_feat->gradient(&_feature_gradient.data()[_n_samp * _task_sizes.size() * 2], _work.data(), p + 2 * _task_sizes.size());
 
-        if(grad)
+        // DocString: nlopt_wrapper_get_reg_optimizer_list_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
         {
-            for(int pp = 0; pp < 2 * _task_sizes.size() + d->_feat->n_params(); ++ pp)
-                grad[pp] = std::accumulate(&_feature_gradient[pp * _n_samp], &_feature_gradient[(pp + 1) * _n_samp], 0.0) / _n_samp;
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
+            );
         }
 
-        return std::accumulate(_residuals.begin(), _residuals.end(), 0.0);
-    }
-
-    static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        double* val_ptr = d->_feat->value_ptr(p + 2 * _task_sizes.size());
-
-        d->_feat->gradient(_feature_gradient.data(), _work.data());
-        int start = 0;
-        for(int tt = 0; tt < _task_sizes.size(); ++tt)
+        // DocString: nlopt_wrapper_get_reg_optimizer_arr_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
         {
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_residuals[start],
-                [p, tt](double vp, double prop){
-                    return _cauchy_scaling * std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / _cauchy_scaling);
-                }
-            );
-
-            // Calculate the base of the gradient for each step
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double prop){
-                    return prop - std::log(vp * p[2*tt] + p[2*tt + 1]);
-                }
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
+        }
 
-            std::transform(
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start +_task_sizes[tt]],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [](double s){
-                    return -2.0 / (1.0 + std::pow(s / _cauchy_scaling, 2.0)) * s;
-                }
+        // DocString: nlopt_wrapper_get_reg_optimizer_arr_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
+        }
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double s){
-                    return s / (p[2 * tt] * vp + p[2 * tt + 1]);
-                }
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_list_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerLogRegression get_log_reg_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
             );
+        }
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                &_feature_gradient[2 * tt * _n_samp + start],
-                [](double vp, double s){
-                    return vp * s;
-                }
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_list_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerLogRegression get_log_reg_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
             );
-
-            for(int pp = 0; pp < d->_feat->n_params(); ++pp)
-            {
-                std::transform(
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start + _task_sizes[tt]],
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    [p, tt](double s){return p[2 * tt] * s;}
-                );
-                std::copy_n(
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    _task_sizes[tt],
-                    &_feature_gradient[(2 * (pp + _task_sizes.size()) + 1) * _n_samp + start]
-                );
-            }
-            start += _task_sizes[tt];
         }
-        return std::accumulate(_residuals.begin(), _residuals.end(), 0.0);
-    }
-
-    /**
-     * @brief uses nlopt to optimize the parameters of a feature
-     *
-     * @param data data structure that passes the feature and prop to nlopt
-     * @param use_simplex If true use a Nelder-Mead type optimizer (LN_SBPLEX) for local optimization
-     *                    Otherwise use a gradient decent based approach if possible
-     */
-    double optimize_feature_params(feat_data data, bool use_simplex=false);
-
-    /**
-     * @brief Set up the projection operator for the objective function
-     *
-     * @param task_sizes number of samples per task
-     * @param max_dim Maximum dimension of the features
-     * @param n_rung maximum rung of a feature
-     */
-    void setup_data(std::vector<int> task_sizes, int max_dim, int n_rung, int max_param_depth=-1);
-
-    /**
-     * @brief Set up the projection operator for the objective function
-     *
-     * @param calc_type string key for the type of the calculation to run
-     * @param prop pointer to the property
-     * @param sizes number of samples per task
-     * @param n_rung maximum rung of a feature
-     */
-    void set_objective(std::string calc_type, double* prop, const std::vector<int> sizes, int n_rung, int max_param_depth=100);
 
-    #ifdef PY_BINDINGS
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_list
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param task_sizes number of samples per task
-         * @param max_dim Maximum dimension of the features
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
          */
-        inline void setup_data(py::list task_sizes, int max_dim, int n_rung, int max_param_depth=-1){setup_data(python_conv_utils::from_list<int>(task_sizes), max_dim, n_rung, max_param_depth);}
+        inline NLOptimizerLogRegression get_log_reg_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
+            );
+        }
 
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
          *
-         * @param task_sizes number of samples per task
-         * @param max_dim Maximum dimension of the features
-         * @param n_rung maximum rung of a feature
+         * @return The correct optimizer
          */
-        inline void setup_data(np::ndarray task_sizes, int max_dim, int n_rung, int max_param_depth=-1){setup_data(python_conv_utils::from_ndarray<int>(task_sizes), max_dim, n_rung, max_param_depth);}
+        inline NLOptimizerLogRegression get_log_reg_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
+            );
+        }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_list_list
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, py::list prop, py::list sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_list<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_list_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, np::ndarray prop, py::list sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_list<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
+        }
+
+        // DocString: nlopt_wrapper_get_class_optimizer_arr_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_arr_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, np::ndarray prop, np::ndarray sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_ndarray<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
     #endif
+
 }
 
 
diff --git a/src/python/__init__.py b/src/python/__init__.py
index ec156aaf..48924cae 100644
--- a/src/python/__init__.py
+++ b/src/python/__init__.py
@@ -316,7 +316,7 @@ def generate_fs(
         phi_0,
         allowed_ops,
         allowed_param_ops,
-        list(prop),
+        prop,
         task_sizes_train,
         calc_type,
         max_phi,
diff --git a/src/python/bindings_docstring_keyed.cpp b/src/python/bindings_docstring_keyed.cpp
index c92a21ce..a110f252 100644
--- a/src/python/bindings_docstring_keyed.cpp
+++ b/src/python/bindings_docstring_keyed.cpp
@@ -58,18 +58,43 @@ void sisso::register_all()
     def("initialize_values_arr", &node_value_arrs::initialize_values_arr);
     def("initialize_d_matrix_arr", &node_value_arrs::initialize_d_matrix_arr);
 
-    void (*set_objective_list_list)(std::string, py::list, py::list, int, int) = &nlopt_wrapper::set_objective;
-    void (*set_objective_arr_list)(std::string, np::ndarray, py::list, int, int) = &nlopt_wrapper::set_objective;
-    void (*set_objective_arr_arr)(std::string, np::ndarray, np::ndarray, int, int) = &nlopt_wrapper::set_objective;
 
-    def("set_objective", set_objective_list_list);
-    def("set_objective", set_objective_arr_list);
-    def("set_objective", set_objective_arr_arr);
+    #ifdef PARAMETERIZE
+        sisso::feature_creation::nloptimizer::registerNLOptimizer();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerClassification();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerRegression();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerLogRegression();
 
-    void(*setup_data_list)(py::list, int, int, int) = &nlopt_wrapper::setup_data;
-    void(*setup_data_arr)(np::ndarray, int, int, int) = &nlopt_wrapper::setup_data;
-    def("setup_data", setup_data_list);
-    def("setup_data", setup_data_arr);
+        NLOptimizerRegression(*get_reg_optimizer_list_list)(py::list, py::list, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_list_arr)(py::list, np::ndarray, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_arr_list)(np::ndarray, py::list, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+
+        NLOptimizerLogRegression(*get_log_reg_optimizer_list_list)(py::list, py::list, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_list_arr)(py::list, np::ndarray, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_arr_list)(np::ndarray, py::list, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+
+        NLOptimizerClassification(*get_class_optimizer_list_list)(py::list, py::list, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_list_arr)(py::list, np::ndarray, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_arr_list)(np::ndarray, py::list, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int) = &nlopt_wrapper::get_class_optimizer;
+
+        def("get_reg_optimizer", get_reg_optimizer_list_list, "@DocString_nlopt_wrapper_get_reg_optimizer_list_list");
+        def("get_reg_optimizer", get_reg_optimizer_list_arr, "@DocString_nlopt_wrapper_get_reg_optimizer_list_arr");
+        def("get_reg_optimizer", get_reg_optimizer_arr_list, "@DocString_nlopt_wrapper_get_reg_optimizer_arr_list");
+        def("get_reg_optimizer", get_reg_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_reg_optimizer_arr_arr");
+
+        def("get_log_reg_optimizer", get_log_reg_optimizer_list_list, "@DocString_nlopt_wrapper_get_log_reg_optimizer_list_list");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_list_arr, "@DocString_nlopt_wrapper_get_log_reg_optimizer_list_arr");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_arr_list, "@DocString_nlopt_wrapper_get_log_reg_optimizer_arr_list");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_log_reg_optimizer_arr_arr");
+
+        def("get_class_optimizer", get_class_optimizer_list_list, "@DocString_nlopt_wrapper_get_class_optimizer_list_list");
+        def("get_class_optimizer", get_class_optimizer_list_arr, "@DocString_nlopt_wrapper_get_class_optimizer_list_arr");
+        def("get_class_optimizer", get_class_optimizer_arr_list, "@DocString_nlopt_wrapper_get_class_optimizer_arr_list");
+        def("get_class_optimizer", get_class_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_class_optimizer_arr_arr");
+    #endif
 }
 
 void sisso::feature_creation::registerFeatureSpace()
@@ -126,6 +151,25 @@ void sisso::feature_creation::registerUnit()
 }
 
 #ifdef PARAMETERIZE
+    void sisso::feature_creation::nloptimizer::registerNLOptimizer()
+    {
+        class_<sisso::feature_creation::nloptimizer::NLOptimizerWrap, boost::noncopyable>("NLOptimizer", no_init)
+            .def("optimize_feature_params", &NLOptimizer::optimize_feature_params, "@DocString_nloptimizer_optimize_feature_params@")
+        ;
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerClassification()
+    {
+        class_<NLOptimizerClassification, bases<NLOptimizer>>("NLOptimizerClassification", no_init);
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerRegression()
+    {
+        class_<NLOptimizerRegression, bases<NLOptimizer>>("NLOptimizerRegression", no_init);
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerLogRegression()
+    {
+        class_<NLOptimizerLogRegression, bases<NLOptimizer>>("NLOptimizerLogRegression", no_init);
+    }
+
     void sisso::feature_creation::node::registerNode()
     {
         void (Node::*reindex_1)(int) = &Node::reindex;
diff --git a/src/python/bindings_docstring_keyed.hpp b/src/python/bindings_docstring_keyed.hpp
index 2eebb78d..8183ab83 100644
--- a/src/python/bindings_docstring_keyed.hpp
+++ b/src/python/bindings_docstring_keyed.hpp
@@ -31,6 +31,22 @@ namespace sisso
         static void registerFeatureSpace();
         static void registerDomain();
         static void registerUnit();
+        namespace nloptimizer
+        {
+            struct NLOptimizerWrap : NLOptimizer, py::wrapper<NLOptimizer>
+            {
+            public:
+                inline std::shared_ptr<ConvexHull1D> convex_hull(){return this->get_override("convex_hull")();}
+                inline double* feature_gradient(int ind){return this->get_override("feature_gradient")();}
+                inline double* residuals(int ind){return this->get_override("residuals")();}
+                inline double cauchy_scaling(){return this->get_override("cauchy_scaling")();}
+            };
+            static void registerNLOptimizer();
+            static void registerNLOptimizerClassification();
+            static void registerNLOptimizerRegression();
+            static void registerNLOptimizerLogRegression();
+
+        }
         namespace node
         {
             /**
@@ -95,7 +111,7 @@ namespace sisso
                 inline std::string get_latex_expr(){return this->get_override("latex_expr")();}
                 inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot){this->get_override("update_add_sub_leaves")();}
                 inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot){this->get_override("update_div_mult_leaves")();}
-                inline void get_parameters(std::vector<double>& prop){this->get_override("get_parameters")();}
+                inline void get_parameters(std::shared_ptr<NLOptimizer> optimizer){this->get_override("get_parameters")();}
                 inline void set_parameters(std::vector<double>, bool check_sz=true){this->get_override("set_parameters")();}
                 inline std::vector<double> parameters(){return this->get_override("parameters")();}
                 inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1){this->get_override("set_bounds")();}
@@ -144,8 +160,6 @@ namespace sisso
                 template<int N>
                 static void registerOperatorNode()
                 {
-                    void (OperatorNode<N>::*get_parameters_list)(py::list) = &OperatorNode<N>::get_parameters;
-                    void (OperatorNode<N>::*get_parameters_arr)(np::ndarray) = &OperatorNode<N>::get_parameters;
                     void (OperatorNode<N>::*set_params_list)(py::list) = &OperatorNode<N>::set_parameters;
                     void (OperatorNode<N>::*set_params_arr)(np::ndarray) = &OperatorNode<N>::set_parameters;
                     py::class_<OperatorNodeWrap<N>, py::bases<Node>, boost::noncopyable>("OperatorNode")
@@ -153,8 +167,7 @@ namespace sisso
                         .def("is_const", &OperatorNode<N>::is_const, "@DocString_op_node_is_const@")
                         .def("rung", py::pure_virtual(&OperatorNode<N>::rung), "@DocString_op_node_rung@")
                         .def("unit", py::pure_virtual(&OperatorNode<N>::unit), "@DocString_op_node_unit@")
-                        .def("get_parameters", get_parameters_arr, "@DocString_op_node_param_arr@")
-                        .def("get_parameters", get_parameters_list, "@DocString_op_node_param_list@")
+                        .def("get_parameters", py::pure_virtual(&OperatorNode<N>::get_parameters), "@DocString_op_node_get_params@")
                         .def("set_parameters", set_params_arr, "@DocString_op_node_set_param_arr@")
                         .def("set_parameters", set_params_list, "@DocString_op_node_set_param_list@")
                         .add_property("n_feats", &OperatorNode<N>::n_feats, "@DocString_op_node_n_feats@")
diff --git a/src/python/feature_creation/FeatureSpace.cpp b/src/python/feature_creation/FeatureSpace.cpp
index d9172f91..46f1bcec 100644
--- a/src/python/feature_creation/FeatureSpace.cpp
+++ b/src/python/feature_creation/FeatureSpace.cpp
@@ -26,6 +26,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -38,7 +39,7 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi[0]->n_samp()),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
 FeatureSpace::FeatureSpace(
@@ -67,6 +68,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -79,7 +81,7 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi[0]->n_samp()),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
 FeatureSpace::FeatureSpace(
@@ -97,6 +99,7 @@ FeatureSpace::FeatureSpace(
     _task_sizes(python_conv_utils::from_list<int>(task_sizes)),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(1e-50),
@@ -108,17 +111,17 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi_0[0]->n_samp()),
     _max_param_depth(-1)
 {
-    if(project_type.compare("regression") == 0)
+    if(_project_type.compare("regression") == 0)
     {
         _project = project_funcs::project_r2;
         _project_no_omp = project_funcs::project_r2_no_omp;
     }
-    else if(project_type.compare("classification") == 0)
+    else if(_project_type.compare("classification") == 0)
     {
         _project = project_funcs::project_classify;
         _project_no_omp = project_funcs::project_classify_no_omp;
     }
-    else if(project_type.compare("log_regression") == 0)
+    else if(_project_type.compare("log_regression") == 0)
     {
         if(_task_sizes.size() > 1)
             throw std::logic_error("Log Regression can not be done using multiple tasks.");
@@ -191,7 +194,7 @@ FeatureSpace::FeatureSpace(
 
     for(int rr = 1; rr < _max_phi; ++rr)
     {
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, rr);
+        nlopt_wrapper::MAX_PARAM_DEPTH = rr;
         bool is_correct = true;
         for(auto& feat : _phi)
         {
@@ -322,7 +325,7 @@ FeatureSpace::FeatureSpace(
 
     for(int rr = 1; rr < _max_phi; ++rr)
     {
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, rr);
+        nlopt_wrapper::MAX_PARAM_DEPTH = rr;
         bool is_correct = true;
         for(auto& feat : _phi)
         {
diff --git a/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc b/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
index 1b163477..08667d36 100644
--- a/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -48,7 +50,7 @@ namespace
 
             allowed_op_funcs::abs_diff(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -61,20 +63,23 @@ namespace
 
         double _a;
         double _alpha;
+
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AbsDiffParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AbsDiffParamNode created with an absolute value above the upper bound)";
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AbsDiffParamNode created with an absolute value below the lower bound)";
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
+
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
 
@@ -84,7 +89,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -92,7 +97,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -100,7 +105,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with only one primary feature present)";
         }
         catch(const InvalidFeatureException& e)
@@ -108,7 +113,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _abs_diff_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +125,7 @@ namespace
     TEST_F(AbsDiffParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_abs_diff_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_abs_node.cc b/tests/googletest/feature_creation/parameterization/test_abs_node.cc
index 6e493cbf..8e472db1 100644
--- a/tests/googletest/feature_creation/parameterization/test_abs_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_abs_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 1);
 
             _task_sizes_train = {900};
@@ -38,7 +40,8 @@ namespace
 
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::abs(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -50,19 +53,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AbsParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 1) << " (AbsParamNode created with an absolute value above the upper bound)";
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 1) << " (AbsParamNode created with an absolute value below the lower bound)";
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -73,7 +77,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AbsParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -81,7 +85,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e40, 1e50, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e40, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -89,7 +93,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _abs_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -101,7 +105,7 @@ namespace
     TEST_F(AbsParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+        _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_abs_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_add_node.cc b/tests/googletest/feature_creation/parameterization/test_add_node.cc
index bb5560ff..3b8f5b26 100644
--- a/tests/googletest/feature_creation/parameterization/test_add_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_add_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::add(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AddParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AddParamNode created with an absolute value above the upper bound)";
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AddParamNode created with an absolute value below the lower bound)";
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AddParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AddParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _add_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(AddParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_add_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cb_node.cc b/tests/googletest/feature_creation/parameterization/test_cb_node.cc
index 8f74c580..12e7479f 100644
--- a/tests/googletest/feature_creation/parameterization/test_cb_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cb_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::cb(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CbParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbParamNode created with an absolute value above the upper bound)";
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbParamNode created with an absolute value below the lower bound)";
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CbParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CbParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(CbParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc b/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
index b7f53456..4eddb999 100644
--- a/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::cbrt(900, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CbrtParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbrtParamNode created with an absolute value above the upper bound)";
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbrtParamNode created with an absolute value below the lower bound)";
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CbrtParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CbrtParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _cbrt_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(CbrtParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_cbrt_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cos_node.cc b/tests/googletest/feature_creation/parameterization/test_cos_node.cc
index 94042cfc..af43f062 100644
--- a/tests/googletest/feature_creation/parameterization/test_cos_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cos_node.cc
@@ -14,6 +14,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 3);
 
             _task_sizes_train = {900};
@@ -52,7 +54,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::cos(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -66,6 +68,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CosParamNodeTest, GeneratorTest)
@@ -73,19 +76,19 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created with an absolute value above the upper bound)";
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created with an absolute value below the lower bound)";
 
-        generateCosParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created from CosNode)";
 
-        generateCosParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created from SinNode)";
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-5);
     }
@@ -96,7 +99,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -104,7 +107,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e49, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -112,7 +115,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created from CosNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +123,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created from SinNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -128,7 +131,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _cos_test->value_ptr(), 90), 1e-5);
         }
         catch(const InvalidFeatureException& e)
@@ -140,7 +143,7 @@ namespace
     TEST_F(CosParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _cos_test = std::make_shared<CosParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _cos_test = std::make_shared<CosParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_cos_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_div_node.cc b/tests/googletest/feature_creation/parameterization/test_div_node.cc
index 042312b6..8ea61b90 100644
--- a/tests/googletest/feature_creation/parameterization/test_div_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_div_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::div(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(DivParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (DivParamNode created with an absolute value above the upper bound)";
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (DivParamNode created with an absolute value below the lower bound)";
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (DivParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (DivParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _div_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(DivParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_div_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_exp_node.cc b/tests/googletest/feature_creation/parameterization/test_exp_node.cc
index dcbcabb7..c9df2ce7 100644
--- a/tests/googletest/feature_creation/parameterization/test_exp_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_exp_node.cc
@@ -15,6 +15,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -55,7 +57,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::exp(900, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,28 +71,29 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(ExpParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created with an absolute value above the upper bound)";
 
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created with an absolute value below the lower bound)";
 
-        generateExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from ExpNode)";
 
-        generateExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from LogNode)";
 
-        generateExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from NegExpNode)";
 
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -101,7 +104,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -109,7 +112,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -117,7 +120,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -125,7 +128,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -133,7 +136,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -141,7 +144,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -153,7 +156,7 @@ namespace
     TEST_F(ExpParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_inv_node.cc b/tests/googletest/feature_creation/parameterization/test_inv_node.cc
index 932b39d2..496303ae 100644
--- a/tests/googletest/feature_creation/parameterization/test_inv_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_inv_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::inv(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(InvParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (InvParamNode created with an absolute value above the upper bound)";
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (InvParamNode created with an absolute value below the lower bound)";
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (InvParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (InvParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _inv_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(InvParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_inv_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_log_node.cc b/tests/googletest/feature_creation/parameterization/test_log_node.cc
index 294771b4..9196ec3b 100644
--- a/tests/googletest/feature_creation/parameterization/test_log_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_log_node.cc
@@ -55,7 +55,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::log(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,6 +69,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(LogParamNodeTest, GeneratorTest)
@@ -76,22 +77,22 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created with an absolute value above the upper bound)";
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created with an absolute value below the lower bound)";
 
-        generateLogParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from ExpNode)";
 
-        generateLogParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from LogNode)";
 
-        generateLogParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from NegExpNode)";
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -102,7 +103,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -110,7 +111,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -118,7 +119,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -126,7 +127,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -134,7 +135,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -142,7 +143,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -154,7 +155,7 @@ namespace
     TEST_F(LogParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_mult_node.cc b/tests/googletest/feature_creation/parameterization/test_mult_node.cc
index e2c65a89..7343711b 100644
--- a/tests/googletest/feature_creation/parameterization/test_mult_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_mult_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::mult(900, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(MultParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (MultParamNode created with an absolute value above the upper bound)";
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (MultParamNode created with an absolute value below the lower bound)";
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (MultParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (MultParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _mult_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(MultParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_mult_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc b/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
index 3dd9c155..cb9554e1 100644
--- a/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
@@ -15,6 +15,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -55,7 +57,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::neg_exp(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,29 +71,31 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(NegExpParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created with an absolute value above the upper bound)";
 
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created with an absolute value below the lower bound)";
 
-        generateNegExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from ExpNode)";
 
-        generateNegExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from LogNode)";
 
-        generateNegExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from NegExpNode)";
 
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
+
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
 
@@ -101,7 +105,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -109,7 +113,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -117,7 +121,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -125,7 +129,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -133,7 +137,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -141,7 +145,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -153,7 +157,7 @@ namespace
     TEST_F(NegExpParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sin_node.cc b/tests/googletest/feature_creation/parameterization/test_sin_node.cc
index 8801de47..75857a13 100644
--- a/tests/googletest/feature_creation/parameterization/test_sin_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sin_node.cc
@@ -14,6 +14,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -52,7 +54,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::sin(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -66,6 +68,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SinParamNodeTest, GeneratorTest)
@@ -73,19 +76,19 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created with an absolute value above the upper bound)";
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created with an absolute value below the lower bound)";
 
-        generateSinParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created from CosNode)";
 
-        generateSinParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created from SinNode)";
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-5);
     }
@@ -96,7 +99,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -104,7 +107,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e49, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -112,7 +115,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created from CosNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +123,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created from SinNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -128,7 +131,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sin_test->value_ptr(), 90), 1e-5);
         }
         catch(const InvalidFeatureException& e)
@@ -140,7 +143,7 @@ namespace
     TEST_F(SinParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+        _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sin_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc b/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
index cbe2a311..27ae7390 100644
--- a/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sixth_pow(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SixPowParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SixPowParamNode created with an absolute value above the upper bound)";
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SixPowParamNode created with an absolute value below the lower bound)";
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SixPowParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SixPowParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SixPowParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sq_node.cc b/tests/googletest/feature_creation/parameterization/test_sq_node.cc
index 943184d7..b2a0a137 100644
--- a/tests/googletest/feature_creation/parameterization/test_sq_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sq_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sq(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SqParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqParamNode created with an absolute value above the upper bound)";
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqParamNode created with an absolute value below the lower bound)";
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SqParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SqParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SqParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc b/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
index 211f977f..b715e738 100644
--- a/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sqrt(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SqrtParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqrtParamNode created with an absolute value above the upper bound)";
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqrtParamNode created with an absolute value below the lower bound)";
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SqrtParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SqrtParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sqrt_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SqrtParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sqrt_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sub_node.cc b/tests/googletest/feature_creation/parameterization/test_sub_node.cc
index 0452e4b7..fccb94da 100644
--- a/tests/googletest/feature_creation/parameterization/test_sub_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sub_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sub(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,21 @@ namespace
 
         double _a;
         double _alpha;
+
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SubParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SubParamNode created with an absolute value above the upper bound)";
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SubParamNode created with an absolute value below the lower bound)";
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +87,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SubParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +95,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SubParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +103,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sub_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +115,7 @@ namespace
     TEST_F(SubParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sub_test->rung(), 1);
 
diff --git a/tests/pytest/test_descriptor_identifier/test_regressor.py b/tests/pytest/test_descriptor_identifier/test_regressor.py
index 72faf818..5b99f7ae 100644
--- a/tests/pytest/test_descriptor_identifier/test_regressor.py
+++ b/tests/pytest/test_descriptor_identifier/test_regressor.py
@@ -36,7 +36,7 @@ def test_sisso_regressor():
 
     op_set = ["add", "sub", "mult", "sq", "cb", "sqrt", "cbrt"]
 
-    feat_space = generate_fs(phi_0, prop, [90], op_set, [], "regression", 2, 10)
+    feat_space = generate_fs(phi_0, prop, [95], op_set, [], "regression", 2, 10)
 
     sisso = SISSORegressor(
         feat_space,
diff --git a/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py b/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
index cf9bf867..d4d49c5a 100644
--- a/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
+++ b/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
@@ -9,7 +9,6 @@ from cpp_sisso import (
 
 
 def test_feature_space():
-    print("in")
     initialize_values_arr(90, 10, 10)
     phi_0 = [
         FeatureNode(
@@ -26,15 +25,11 @@ def test_feature_space():
 
     op_set = ["add", "sub", "mult", "sq", "cb", "sqrt", "cbrt"]
 
-    print("feat spac")
     feat_space = generate_fs(phi_0, prop, [90], op_set, [], "regression", 2, 10)
-    print("sis")
     feat_space.sis(prop)
 
-    print("rm")
     shutil.rmtree("feature_space/")
 
-    print("assert")
     assert feat_space.phi_selected[0].postfix_expr == "1|0|add|sq"
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py b/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
index 0c6fb9a5..9da86fd8 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     SqNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 import pandas as pd
 import numpy as np
@@ -21,17 +21,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_lorentzian():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.linspace(-20.023658, 20.23658, 900)
     test_data_1 = np.linspace(-19.98549, 19.08, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 21.4 / ((data_1 - 0.25) ** 2.0 + 7.1) - 1.478
-    set_objective("regression", prop, [900], 2, 2)
+    optimizer = get_reg_optimizer([900], prop, 2, 2, 0.5)
 
     feat_node = InvParamNode(SqNode(feat_1, 2, 1e-50, 1e50), 3, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
index 5a66f2f8..0c7dc69d 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AbsParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,18 +20,18 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_abs_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.linspace(-20, 20, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -2.3 * np.abs(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = AbsParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
-
+    feat_node.get_parameters(optimizer)
+    print(feat_node.parameters)
     assert check_feat_parmeters(feat_node, prop)
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
index 52b3474b..a46b9033 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AbsDiffParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,7 +20,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_abs_diff_node():
-    initialize_values_arr(900, 100, 4)
+    initialize_values_arr(900, 100, 2)
 
     data_1 = np.linspace(-20, 20, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 100)
@@ -31,10 +31,10 @@ def test_param_abs_diff_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * np.abs(data_1 - (1.5 * data_2 + 0.8751)) - 1.2
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = AbsDiffParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
index 12cc528e..b9f02ae7 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AddParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -19,7 +19,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_add_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 2)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
@@ -30,10 +30,10 @@ def test_param_add_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * (data_1 + 1.5 * data_2) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = AddParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
index ed48ef11..ff823052 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CbParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_cb_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.power(1.55 * data_1 + 0.8751, 3.0) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = CbParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
index 0ce5249c..3e19cd2f 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CbrtParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_cbrt_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.cbrt(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = CbrtParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
index 224a64d9..7a8762e6 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CosParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_cos_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -1.1 * np.cos(1.25 * data_1 + 2.13) + 0.01578
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = CosParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
index 0a53c829..b07f0d6a 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     DivParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,7 +20,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_div_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 2)
 
     data_1 = np.random.uniform(-2.50, 2.50, 900)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
@@ -31,10 +31,10 @@ def test_param_div_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = 4.124 * data_1 / ((data_2 + 1.8751)) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = DivParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
index 8a6123f7..782ec320 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     ExpParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_exp_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.random.uniform(-2.50, 2.50, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.exp(1.32 * data_1 + 0.8751) - 0.12
-    set_objective("regression", prop, [900], 10, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = ExpParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
index bc5a581b..eae398a4 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     InvParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_inv_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(1.0, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 1.0 / (1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = InvParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
index 47a4cb75..46dd8327 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     LogParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_log_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -2.014 * np.log(1.15 * data_1 + 0.1387)
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = LogParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
index 82286f75..f4044c5b 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     NegExpParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,18 +20,18 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_neg_exp_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.random.uniform(-2.5, 2.5, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
-    prop = np.exp(-1.55 * data_1 + 0.8751) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    prop = np.exp(-1.55 * data_1 - 0.8751) - 0.12
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = NegExpParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
-
+    feat_node.get_parameters(optimizer)
+    print(feat_node.parameters)
     assert check_feat_parmeters(feat_node, prop)
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
index 4ea5bb80..0cf5e92d 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SinParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_sin_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -1.1 * np.sin(1.25 * data_1 + 2.13) + 0.01578
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = SinParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
index 69e5bb38..ad9667c5 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SixPowParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_six_pow_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 1.55 * np.power(data_1 + 0.21, 6.0) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = SixPowParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
index 77084b49..eafecb19 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SqParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sq_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.power(1.55 * data_1 + 0.8751, 2.0) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SqParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
index 9eb621c4..9602d4b7 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SqrtParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sqrt_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.sqrt(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SqrtParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
index 93715635..02a26979 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SubParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -19,7 +19,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sub_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 2)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
@@ -30,10 +30,10 @@ def test_param_sub_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * (data_1 - 1.5 * data_2) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SubParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
index 5796af4e..7ab782df 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_abs_diff_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
index 8256cb2d..4226c71e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_abs_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
index dfb04a64..0c79638e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_add_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
index 22c9da78..8ddd0bc1 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_bin_bin_model_eval():
     initialize_values_arr(90, 10, 3)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
index fc84e148..240d3478 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_bin_un_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
index 2be9338e..de176c93 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cb_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
index 827e7e37..55d00779 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cbrt_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
index 97b0acef..54d6de3e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cos_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
index 44380b63..11a78d9e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_div_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
index 6a46701d..e2ee2581 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_exp_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e1 - 1e1
     test_data_1 = np.random.random(10) * 2e1 - 1e1
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
index 2061d475..d770359b 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_inv_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
index 8e1bea46..e2b5cf92 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_log_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
index f8f28abb..9223130e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_mult_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
index 9ba26d6a..c2153735 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_neg_exp_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e1 - 1e1
     test_data_1 = np.random.random(10) * 2e1 - 1e1
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
index 94a81327..bd3be3b6 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sin_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
index fe66ad0c..c169fc8c 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_six_pow_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
index 883a70fc..27460202 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sq_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
index 8769bfda..65de6dc0 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sqrt_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
index a427b4e4..b6d78952 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sub_parm_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
index c2594713..f3e2f003 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_un_bin_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
index a0024a1e..626ce2fd 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_un_un_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
-- 
GitLab