diff --git a/src/feature_creation/feature_space/FeatureSpace.cpp b/src/feature_creation/feature_space/FeatureSpace.cpp
index ecf32ff70f8c2db3b5183ad4d3cb6bfccbb63ddc..e943d3d9475320d40dc0f431bf0b2bf980410027 100644
--- a/src/feature_creation/feature_space/FeatureSpace.cpp
+++ b/src/feature_creation/feature_space/FeatureSpace.cpp
@@ -65,6 +65,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -77,10 +78,10 @@ FeatureSpace::FeatureSpace(
     _n_rung_generate(n_rung_generate),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
-void FeatureSpace::initialize_fs(std::string project_type)
+void FeatureSpace::initialize_fs()
 {
     #ifndef PARAMETERIZE
         if(_allowed_param_ops.size() != 0)
@@ -90,7 +91,7 @@ void FeatureSpace::initialize_fs(std::string project_type)
             _max_param_depth = _max_phi;
         if((_max_param_depth < 0) || (_max_param_depth > _max_phi))
             throw std::logic_error("Invalid parameter depth.");
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, _max_param_depth);
+        nlopt_wrapper::MAX_PARAM_DEPTH = _max_param_depth;
     #endif
 
     if(_n_rung_store == -1)
@@ -112,17 +113,17 @@ void FeatureSpace::initialize_fs(std::string project_type)
         sum_file_stream.close();
     }
 
-    if(project_type.compare("regression") == 0)
+    if(_project_type.compare("regression") == 0)
     {
         _project = project_funcs::project_r2;
         _project_no_omp = project_funcs::project_r2_no_omp;
     }
-    else if(project_type.compare("classification") == 0)
+    else if(_project_type.compare("classification") == 0)
     {
         _project = project_funcs::project_classify;
         _project_no_omp = project_funcs::project_classify_no_omp;
     }
-    else if(project_type.compare("log_regression") == 0)
+    else if(_project_type.compare("log_regression") == 0)
     {
         if(_task_sizes.size() > 1)
             throw std::logic_error("Log Regression can not be done using multiple tasks.");
@@ -183,47 +184,73 @@ void FeatureSpace::initialize_fs(std::string project_type)
     _scores.resize(_phi.size());
 }
 
-void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
-{
-    int phi_ind = feat - _phi.begin();
-    feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
-
-    for(auto& op : _un_operators)
+#ifdef PARAMETERIZE
+    void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, std::shared_ptr<NLOptimizer> optimizer, double l_bound, double u_bound)
     {
-        op(feat_set, *feat, feat_ind, l_bound, u_bound);
-    }
+        int phi_ind = feat - _phi.begin();
+        feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
 
-    for(auto& op : _com_bin_operators)
-    {
-        for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
-            op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
-    }
+        for(auto& op : _un_operators)
+        {
+            op(feat_set, *feat, feat_ind, l_bound, u_bound);
+        }
 
-    for(auto& op : _bin_operators)
-    {
-        for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+        for(auto& op : _com_bin_operators)
         {
-            op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
-            op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+            {
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            }
         }
-    }
-    #ifdef PARAMETERIZE
         for(auto& op : _un_param_operators)
-            op(feat_set, *feat, feat_ind, l_bound, u_bound, _prop);
+            op(feat_set, *feat, feat_ind, l_bound, u_bound, optimizer);
 
         for(auto& op : _com_bin_param_operators)
             for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
-                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
         for(auto& op : _bin_param_operators)
         {
             for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
             {
-                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
-                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, _prop);
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, optimizer);
             }
         }
-    #endif
-}
+    }
+#else
+    void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
+    {
+        int phi_ind = feat - _phi.begin();
+        feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
+
+        for(auto& op : _un_operators)
+        {
+            op(feat_set, *feat, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _com_bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+        }
+
+        for(auto& op : _bin_operators)
+        {
+            for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
+            {
+                op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
+                op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
+            }
+        }
+    }
+#endif
 
 void FeatureSpace::generate_feature_space()
 {
@@ -245,16 +272,31 @@ void FeatureSpace::generate_feature_space()
         int feat_ind = _phi.size();
         node_value_arrs::clear_temp_reg();
         double start = omp_get_wtime();
-        #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
-        {
-            std::vector<node_ptr> next_phi_private;
-            #pragma omp for schedule(dynamic)
-            for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
-                generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
+        #ifdef PARAMETERIZE
+            #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
+            {
+                std::vector<node_ptr> next_phi_private;
+                std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
 
-            #pragma omp critical
-            next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
-        }
+                #pragma omp for schedule(dynamic)
+                for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
+                    generate_new_feats(feat_1, next_phi_private, feat_ind, optimizer, l_bound, u_bound);
+
+                #pragma omp critical
+                next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
+            }
+        #else
+            #pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
+            {
+                std::vector<node_ptr> next_phi_private;
+                #pragma omp for schedule(dynamic)
+                for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
+                    generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
+
+                #pragma omp critical
+                next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
+            }
+        #endif
         _start_gen.push_back(_phi.size());
         node_value_arrs::clear_temp_reg();
         if((nn < _max_phi) || (nn <= _n_rung_store) || (_mpi_comm->size() == 1))
@@ -523,6 +565,10 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
         std::vector<node_ptr> phi_sel_private(phi_sel);
         std::vector<double> scores_sel_private(scores_sel);
 
+        #ifdef PARAMETERIZE
+        std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
+        #endif
+
         int feat_ind = _phi.size();
 
         #pragma omp for schedule(dynamic)
@@ -533,7 +579,11 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
 
             bool is_sel = (*feat)->selected();
             (*feat)->set_selected(false);
-            generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
+            #ifdef PARAMETERIZE
+                generate_new_feats(feat, generated_phi, feat_ind, optimizer, _l_bound, _u_bound);
+            #else
+                generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
+            #endif
             (*feat)->set_selected(is_sel);
 
             if(generated_phi.size() == 0)
diff --git a/src/feature_creation/feature_space/FeatureSpace.hpp b/src/feature_creation/feature_space/FeatureSpace.hpp
index d043ee84828ddb21c0d9be428ede8d2d04481679..aeccb2ac1fc6b9765d4865814581362f785d7baa 100644
--- a/src/feature_creation/feature_space/FeatureSpace.hpp
+++ b/src/feature_creation/feature_space/FeatureSpace.hpp
@@ -61,6 +61,7 @@ class FeatureSpace
 
     const std::vector<int> _task_sizes; //!< The number of elements in each task (training data)
     std::vector<int> _start_gen; //!< list of the indexes where each generation starts in _phi
+    const std::string _project_type; //!< The type of projection that should be done during SIS
     const std::string _feature_space_file; //!< File to store information about the selected features
     const std::string _feature_space_summary_file; //!< File to store information about the selected features
 
@@ -126,10 +127,8 @@ public:
 
     /**
      * @brief Initialize the feature set given a property vector
-     *
-     * @param prop The property trying to be learned
      */
-    void initialize_fs(std::string project_type);
+    void initialize_fs();
 
     /**
      * @brief Generate the full feature set from the allowed operators and initial feature set
@@ -221,18 +220,45 @@ public:
      */
     inline int n_rung_generate(){return _n_rung_generate;}
 
-    /**
-     * @brief Generate a new set of features from a single feature
-     * @details Take in the feature and perform all valid algebraic operations on it.
-     *
-     * @param feat The feature to spawn new features from
-     * @param feat_set The feature set to pull features from for combinations
-     * @param feat_ind starting index for the next feature generated
-     * @param l_bound lower bound for the absolute value of the feature
-     * @param u_bound upper bound for the abosulte value of the feature
-     */
-    void generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound=1e-50, double u_bound=1e50);
-
+    #ifdef PARAMETERIZE
+        /**
+         * @brief Generate a new set of features from a single feature
+         * @details Take in the feature and perform all valid algebraic operations on it.
+         *
+         * @param feat The feature to spawn new features from
+         * @param feat_set The feature set to pull features from for combinations
+         * @param feat_ind starting index for the next feature generated
+         * @param optimizer The object used to optimize the parameterized features
+         * @param l_bound lower bound for the absolute value of the feature
+         * @param u_bound upper bound for the abosulte value of the feature
+         */
+        void generate_new_feats(
+            std::vector<node_ptr>::iterator& feat,
+            std::vector<node_ptr>& feat_set,
+            int& feat_ind,
+            std::shared_ptr<NLOptimizer> optimizer,
+            double l_bound=1e-50,
+            double u_bound=1e50
+        );
+    #else
+        /**
+         * @brief Generate a new set of features from a single feature
+         * @details Take in the feature and perform all valid algebraic operations on it.
+         *
+         * @param feat The feature to spawn new features from
+         * @param feat_set The feature set to pull features from for combinations
+         * @param feat_ind starting index for the next feature generated
+         * @param l_bound lower bound for the absolute value of the feature
+         * @param u_bound upper bound for the abosulte value of the feature
+         */
+        void generate_new_feats(
+            std::vector<node_ptr>::iterator& feat,
+            std::vector<node_ptr>& feat_set,
+            int& feat_ind,
+            double l_bound=1e-50,
+            double u_bound=1e50
+        );
+    #endif
     /**
      * @brief Calculate the SIS Scores for feature generated on the fly
      * @details Create the next rung of features and calculate their projection scores. Only keep those that can be selected by SIS.
diff --git a/src/feature_creation/node/operator_nodes/OperatorNode.hpp b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
index 5f6535031bbdc571c9f9326d61aafee72bca3462..8fbf1cb856d94db028f97f082aec81d63ce67a0f 100644
--- a/src/feature_creation/node/operator_nodes/OperatorNode.hpp
+++ b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
@@ -337,13 +337,14 @@ public:
          */
         virtual std::vector<double> parameters() = 0;
 
+        //DocString: op_node_get_params
         /**
          * @brief Solve the non-linear optimization to set the parameters
          * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop) = 0;
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer) = 0;
 
         /**
          * @brief Set the non-linear parameters
@@ -357,7 +358,7 @@ public:
          */
         virtual inline int n_params(int n_cur = 0, int depth = 1)
         {
-            return (depth > nlopt_wrapper::_max_param_depth) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
+            return (depth > nlopt_wrapper::MAX_PARAM_DEPTH) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
         }
 
         /**
@@ -488,33 +489,6 @@ public:
         }
 
         #ifdef PY_BINDINGS
-            // DocString: op_node_param_arr
-            /**
-             * @brief Solve the non-linear optimization to set the parameters
-             * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-             *
-             * @param prop property to fit to get the parameters
-             * @param param_list List describing the parameters to fit
-             */
-            inline void get_parameters(np::ndarray prop)
-            {
-                std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-                get_parameters(prop_vec);
-            }
-
-            // DocString: op_node_param_list
-            /**
-             * @brief Solve the non-linear optimization to set the parameters
-             * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-             *
-             * @param prop property to fit to get the parameters
-             * @param param_list List describing the parameters to fit
-             */
-            inline void get_parameters(py::list prop)
-            {
-                std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-                get_parameters(prop_vec);
-            }
 
             // DocString: op_node_set_param_list
             /**
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
index 6d5b66abfa0965687f5d2caf02340f082ab90a1f..2a6a188c2f678547da69244a5ea3081d7e87359b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
@@ -156,7 +156,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -191,7 +192,7 @@ public:
             return fmt::format(
                 "|{:.10e}*{}{:+15.10e}|",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -209,7 +210,7 @@ public:
             return fmt::format(
                 "\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
index 58cd97591fc270d73b85f172308d8f4ead2d6b6b..17417f963f6b2a847698bcf5b3cd683ac9603398 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsParamNode)
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,40 +17,40 @@ void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 AbsParamNode::AbsParamNode()
 {}
 
-AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
-    AbsNode(feat, feat_ind)
+AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
-    AbsNode(feat, feat_ind)
+AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
-    AbsNode(feat, feat_ind)
+    AbsNode(feat, feat_ind),
+    _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
 }
 
-void AbsParamNode::get_parameters(std::vector<double>& prop)
+void AbsParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    _sign_alpha = 1.0;
+    double min_res = optimizer->optimize_feature_params(this);
     std::vector<double> param_cp(_params);
 
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d) > min_res)
+    if(optimizer->optimize_feature_params(this) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -61,7 +61,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::abs(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +71,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
 void AbsNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::abs(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,9 +81,8 @@ void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
-
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
 
@@ -92,7 +91,7 @@ void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
index ab5ae9afb708edc98b29c96bdf00a4043087fb0c..4c0ec0c9a744143bb5c4421aa6311d70237e7aa1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
@@ -63,7 +63,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -73,7 +73,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -146,7 +146,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -177,7 +178,6 @@ public:
      */
     void update_postfix(std::string& cur_expr, bool add_params=true)
     {
-
         std::stringstream postfix;
         postfix << get_postfix_term();
         if(add_params)
@@ -191,6 +191,6 @@ public:
     }
 };
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
index 4dc4c0e2082ab6ee4a66bbe3a2f527c2d9d98fe2..11f6bfb32388966b3924e8fdddb2d763b637006d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
@@ -167,7 +167,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -201,9 +202,9 @@ public:
         {
             return fmt::format(
                 "|{} - ({:.10e}*{}{:+15.10e})|",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -220,9 +221,9 @@ public:
         {
             return fmt::format(
                 "\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
index e2b8350e911739d0357d91434e9852e2fc34434f..438b8f01251effe9d30d8eeeacaae924afe61e41 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsDiffParamNode)
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AbsDiffParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AbsDiffParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
@@ -32,7 +32,7 @@ void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1,
 AbsDiffParamNode::AbsDiffParamNode()
 {}
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop):
+AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     std::map<std::string, int> add_sub_leaves;
@@ -44,7 +44,7 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     double* params = _params.data();
     double* val_ptr = value_ptr();
@@ -58,11 +58,11 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
         throw InvalidFeatureException();
 }
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop):
+AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound):
@@ -71,13 +71,9 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_in
     _params.resize(n_params(), 0.0);
 }
 
-void AbsDiffParamNode::get_parameters(std::vector<double>& prop)
+void AbsDiffParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void AbsDiffNode::set_value(const double* params, int offset, int depth)
@@ -85,8 +81,8 @@ void AbsDiffNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::abs_diff(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -98,15 +94,15 @@ void AbsDiffNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::abs_diff(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
 
 void AbsDiffNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 {
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
index 1414193be0133020260ccbe6ed541d58fa70277b..f3781eb3ff48864a79fbffcbf2d3d58479c4b5aa 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
@@ -59,7 +59,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -182,6 +183,6 @@ public:
     }
 };
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
index 47c1a8375742d3e3c0f611d426df5fd273e6f805..3876062fb5d3ae8531d722df0635eeee38f133b6 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
@@ -164,7 +164,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -198,9 +199,9 @@ public:
         {
             return fmt::format(
                 "({} + {:.10e}*{}{:+15.10e})",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -217,9 +218,9 @@ public:
         {
             return fmt::format(
                 "\\left({} + {:.3}*{}{:+8.3e}\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
index 28f4814b8b20bcfc04780e978712a25237e6fdc4..25940d1bab770ff90096a794287a59e9558a6284 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AddParamNode)
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
 
@@ -18,21 +18,21 @@ void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 AddParamNode::AddParamNode()
 {}
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -41,13 +41,9 @@ AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void AddParamNode::get_parameters(std::vector<double>& prop)
+void AddParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void AddNode::set_value(const double* params, int offset, int depth)
@@ -55,8 +51,8 @@ void AddNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::add(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -68,8 +64,8 @@ void AddNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::add(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -79,7 +75,7 @@ void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = 0.0;
     ub[1] = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
index e82661af383f53b3c62166d87256f133a6cb54a7..02533c01581af62dfe3f36f420266d6e6c3f6495 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    AddParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
index 28a538d1a2ac30d392d7a2c45c2c9379176611bc..6f6953182f1fa2c842c86694b9f04a865ac03bb3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^3)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^3\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
index 50080f44b2556b66b8213ed0d47b28107bab07d6..1179f0f76e531ab300ef9caacbca8db0b8337698 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbParamNode)
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& f
 CbParamNode::CbParamNode()
 {}
 
-CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
     set_value();
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CbParamNode::CbParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CbParamNode::CbParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
     set_value();
 }
 
@@ -43,20 +43,16 @@ CbParamNode::CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_b
     _params.resize(n_params(), 0.0);
 }
 
-void CbParamNode::get_parameters(std::vector<double>& prop)
+void CbParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void CbNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cb(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,7 +63,7 @@ void CbNode::set_value(const double* params, int offset, int depth)
 void CbNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cb(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -77,7 +73,7 @@ void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
index 8c029ae54068a7f8c7218da3af709ef2dde0ad3d..2d56e25eafdd40b3782089bf7085858993fb30c1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CbParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CbParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CbParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
index a19a23d450b146fb2b0d28f40821ad1482a369f0..8d12cc062865ba89a98b64b0aecba41a7839935f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(cbrt({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\sqrt[3]{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
index 50a7436d991705937bfe5ab669cf0dac2f47702c..1c59e45e5669a25797e6518a4910679992bcbf83 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CbrtParamNode)
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 CbrtParamNode::CbrtParamNode()
 {}
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CbrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -43,17 +43,13 @@ CbrtParamNode::CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double
     _params.resize(n_params(), 0.0);
 }
 
-void CbrtParamNode::get_parameters(std::vector<double>& prop)
+void CbrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, true);
+    _sign_alpha = 1.0;
+    double min_res = optimizer->optimize_feature_params(this, true);
     std::vector<double> param_cp(_params);
-
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d, true) > min_res)
+    if(optimizer->optimize_feature_params(this, true) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -64,7 +60,7 @@ void CbrtNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cbrt(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,7 +71,7 @@ void CbrtNode::set_value(const double* params, int offset, int depth)
 void CbrtNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cbrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -85,7 +81,7 @@ void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2);
@@ -96,7 +92,7 @@ void CbrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
index a8a24bbadcfdf1f2c7022596fb2c8d77cf32f3c0..424a292c81fe069178064ef9136f5d09f9ea467e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CbrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CbrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CbrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -189,6 +190,6 @@ public:
     }
 };
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
index 8f16af077e58862dacf8872ec434426671946cca..f2ed8157d33a91ffd81901f811579f7294d4b6e1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(cos({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\cos{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
index 5f3e8f34d8acd6b49c77940c2870449f3b45678e..a9e9e9265f31bb4795b2c0a2ad2345f6d3c50cc8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(CosParamNode)
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         return;
 
-    node_ptr new_feat = std::make_shared<CosParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<CosParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 CosParamNode::CosParamNode()
 {}
 
-CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     CosNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-CosParamNode::CosParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+CosParamNode::CosParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     CosNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ CosParamNode::CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void CosParamNode::get_parameters(std::vector<double>& prop)
+void CosParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void CosNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::cos(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void CosNode::set_value(const double* params, int offset, int depth)
 void CosNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::cos(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
index bac3a7b1a1134ea5584ccd2b94a88dba42572bd2..fb6ace4cca63b8dc4df2c514616a58806e2d3239 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
@@ -61,7 +61,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    CosParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    CosParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    CosParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
index 9399d2d14223412df2e0d0c3278b1df1ca4882c4..8f862c7071fd6b13352edb8072cd62a6104b61c3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
@@ -164,7 +164,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -198,9 +199,9 @@ public:
         {
             return fmt::format(
                 "({} / ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -217,9 +218,9 @@ public:
         {
             return fmt::format(
                 "\\left(\\frac{{ {} }}{{ {:.3e}*{}{:+8.3e} }} \\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
index 12b02bb2c73cddeb92315f6716dddb746a2f1f3b..7c81fd0cf8654f1a1ac90250d33d2f650b083387 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
@@ -2,13 +2,13 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(DivParamNode)
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
     if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
         return;
 
-    node_ptr new_feat = std::make_shared<DivParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<DivParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -20,26 +20,26 @@ void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 DivParamNode::DivParamNode()
 {}
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
      if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     set_value();
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
 
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -48,13 +48,9 @@ DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void DivParamNode::get_parameters(std::vector<double>& prop)
+void DivParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, rung() > 1);
+    double min_res = optimizer->optimize_feature_params(this, rung() > 1);
 }
 
 void DivNode::set_value(const double* params, int offset, int depth)
@@ -62,8 +58,8 @@ void DivNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::div(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,8 +71,8 @@ void DivNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::div(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -86,7 +82,7 @@ void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
index 4ba1fb4d8781bb2439d510a2b36f7e26abdbd603..35f5a89a2731b780aee0b5268b9fb7252aa8f742 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    DivParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
index d6e5aa80a7ce8d6895901941720c1d2ebdf7f297..d00bf403e791bad32d91e4eee01041346a0c69cd 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(exp({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\exp{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
index 7c43d7db6d87208f73fcc994c938030f702d0fb9..659118dbfa5df4c1bb4a4490486f3c757648305f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(ExpParamNode)
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         return;
 
-    node_ptr new_feat = std::make_shared<ExpParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<ExpParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 ExpParamNode::ExpParamNode()
 {}
 
-ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ ExpParamNode::ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void ExpParamNode::get_parameters(std::vector<double>& prop)
+void ExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this, true);
 }
 
 void ExpNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::exp(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void ExpNode::set_value(const double* params, int offset, int depth)
 void ExpNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -83,7 +79,7 @@ void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent) = 1.0;
     *(ub - from_parent) = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
index 0beb38e9e29c03dc55dbba05d615479801801eae..6fae956f1f742469a9051fdf788413e021eb47d0 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    ExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    ExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    ExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
index cdcf4a1031ef5a10b901a18f7405a80fb74479ff..642a360e5957fc9a76bc1dcfd7368f410d87d4e9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
@@ -150,7 +150,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -184,7 +185,7 @@ public:
             return fmt::format(
                 "(1.0 / ({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -202,7 +203,7 @@ public:
             return fmt::format(
                 "\\left(\\frac{{1}}{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
index a5da2982bc4b2affe70a1104f525173106b86349..9f923fc33fcd74ba22cb9332c7ff5b935eed8556 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(InvParamNode)
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
         return;
 
-    node_ptr new_feat = std::make_shared<InvParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<InvParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 InvParamNode::InvParamNode()
 {}
 
-InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     InvNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-InvParamNode::InvParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+InvParamNode::InvParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     InvNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ InvParamNode::InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void InvParamNode::get_parameters(std::vector<double>& prop)
+void InvParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d, rung() > 1);
+    double min_res = optimizer->optimize_feature_params(this, rung() > 1);
 }
 
 void InvNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::inv(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void InvNode::set_value(const double* params, int offset, int depth)
 void InvNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::inv(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void InvNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
index b8706b7126415013be17979112291cf32e2495c2..c926e57e2e6fd2db4cfa472492bb1a08221b95ae 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    InvParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    InvParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    InvParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
index 061ebd568803a65cc9117fa8c75cbf7abb88f8dd..0d515e89235b363d2f21098dd0079649a2a3b153 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -188,7 +189,7 @@ public:
             return fmt::format(
                 "(ln({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -206,7 +207,7 @@ public:
             return fmt::format(
                 "\\left(\\ln{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
index 0b5e462a51da8ae3cef65f027fb23cd04f00aff9..cfc6a5d986b17769a41f4bbc6e6f3a4c96f26863 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(LogParamNode)
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
         return;
 
-    node_ptr new_feat = std::make_shared<LogParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<LogParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 LogParamNode::LogParamNode()
 {}
 
-LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-LogParamNode::LogParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+LogParamNode::LogParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ LogParamNode::LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void LogParamNode::get_parameters(std::vector<double>& prop)
+void LogParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void LogNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::log(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void LogNode::set_value(const double* params, int offset, int depth)
 void LogNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::log(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent + 1) = 0.0;
     *(ub - from_parent + 1) = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
index 53486f81b96c3e69bcd45ae95f4f7561451eca88..0d75c274082194debcb6c0fba8394b8781f0e4c9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    LogParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    LogParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    LogParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
index 1b47135df8e45a9816943b4e8045cfc74dde636e..898aae5608f78e2202484ffa3f546aef07d790e0 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
@@ -165,7 +165,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -199,9 +200,9 @@ public:
         {
             return fmt::format(
                 "({} * ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -218,9 +219,9 @@ public:
         {
             return fmt::format(
                 "\\left({} * \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
index 51f5212203aa7fb7d4bd2833c9f6020a39b18c5a..f83edc462b455db9a09eb8f9d50d351f52c9f644 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(MultParamNode)
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,11 +17,11 @@ void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, no
 MultParamNode::MultParamNode()
 {}
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop):
+MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
@@ -33,20 +33,16 @@ MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, dou
     _params.resize(n_params(), 0.0);
 }
 
-MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop):
+MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
     MultNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
-void MultParamNode::get_parameters(std::vector<double>& prop)
+void MultParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void MultNode::set_value(const double* params, int offset, int depth)
@@ -54,8 +50,8 @@ void MultNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::mult(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,8 +63,8 @@ void MultNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::mult(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -78,7 +74,7 @@ void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
index 55f5ecd16881dd3f35d026b31c9220667514b76d..a3bc169cb91a469d5ff7db72899e50414247e508 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    MultParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
index 132b40462457d2c8e03236f0c16ce9d1d97f4581..9f2b0d3f724293af79ecaff8288b2e6a3e61c718 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(exp(-1.0 * {:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\exp{{ \\left(-\\left({:.3e}{}{:+8.3e} \\right)\\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
index 7fa5cd2ce7e252100cb07874008a369d91547fae..b58b87aa806b4ba7c29f745f960b8150c0f256c5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(NegExpParamNode)
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         return;
 
-    node_ptr new_feat = std::make_shared<NegExpParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<NegExpParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, in
 NegExpParamNode::NegExpParamNode()
 {}
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
         throw InvalidFeatureException();
 
     _params.resize(n_params(),  0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
     _params.resize(n_params(),  0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ NegExpParamNode::NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, do
     _params.resize(n_params(),  0.0);
 }
 
-void NegExpParamNode::get_parameters(std::vector<double>& prop)
+void NegExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this, true);
 }
 
 void NegExpNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::neg_exp(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void NegExpNode::set_value(const double* params, int offset, int depth)
 void NegExpNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::neg_exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -83,7 +79,7 @@ void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(lb - from_parent) = 1.0;
     *(ub - from_parent) = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
index 8e4396882ab6d46e9cff894c3a642beccc4b5ce3..920c1132f04337f9a0d5a954205a42dabc2f1e57 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    NegExpParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    NegExpParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    NegExpParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
index c11f4af7700ab30b57c4f09e3251d2305bf2ee9b..fb7564caa7f9a3a231b3181c059123c8fe35d729 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
@@ -2,14 +2,14 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SinParamNode)
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         return;
 
-    node_ptr new_feat = std::make_shared<SinParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SinParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -21,24 +21,24 @@ void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 SinParamNode::SinParamNode()
 {}
 
-SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SinNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
         throw InvalidFeatureException();
 
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SinParamNode::SinParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SinParamNode::SinParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SinNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -47,20 +47,16 @@ SinParamNode::SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u
     _params.resize(n_params(), 0.0);
 }
 
-void SinParamNode::get_parameters(std::vector<double>& prop)
+void SinParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SinNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sin(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -71,7 +67,7 @@ void SinNode::set_value(const double* params, int offset, int depth)
 void SinNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sin(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -81,7 +77,7 @@ void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = -1.0 * M_PI;
     ub[1] = M_PI;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
index aab0ed154374014b78ce1d0e47c29a0f8f33a2a0..06df38e42453ddb53446dcb524259a5ee282d9ea 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SinParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SinParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SinParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
index 5ae01ef6fcd4deda3cfed6bb3e9e223cbb9a67cd..1097729604434690796d98515c245a6de2777563 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(sin({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\sin{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
index 2a1943418423a7cb9a7bf3bc6ae3238af3e3672a..552f71ab368a732e1c38e6c94ac4575cb36eda50 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SixPowParamNode)
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, in
 SixPowParamNode::SixPowParamNode()
 {}
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SixPowNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -40,20 +40,16 @@ SixPowParamNode::SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, do
     _params.resize(n_params(), 0.0);
 }
 
-void SixPowParamNode::get_parameters(std::vector<double>& prop)
+void SixPowParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SixPowNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sixth_pow(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -64,7 +60,7 @@ void SixPowNode::set_value(const double* params, int offset, int depth)
 void SixPowNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sixth_pow(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -74,7 +70,7 @@ void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
index d7bd87d2960469596cd71c8d2347b989698cf453..296f395098293f159b080a3e44be4f9999d8766f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
@@ -61,7 +61,7 @@ public:
      * @param param_list The list of parameters to optimize using non-linear least squares
      * @param prop The property to fit to
      */
-    SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SixPowParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -71,7 +71,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SixPowParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SixPowParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -144,7 +144,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -181,6 +182,6 @@ public:
     }
 };
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
index 8214b8394b96f400db8002a066b7483a2b142e6a..2802c95a5e7571566f3947bd83fa912fab3df1e1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -190,7 +191,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^6)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -208,7 +209,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^6\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
index b5237be26b5da93b2f524954566422e51bf4e69e..cb07323461d9b59abdfa2daae7044f3bbe222e23 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqParamNode)
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& f
 SqParamNode::SqParamNode()
 {}
 
-SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SqParamNode::SqParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SqParamNode::SqParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqNode(feat, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -40,21 +40,16 @@ SqParamNode::SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_b
     _params.resize(n_params(), 0.0);
 }
 
-void SqParamNode::get_parameters(std::vector<double>& prop)
+void SqParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-    double minf;
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SqNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sq(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -65,7 +60,7 @@ void SqNode::set_value(const double* params, int offset, int depth)
 void SqNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sq(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -75,7 +70,7 @@ void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
index a387ecba377b116bb342f0dbffebe634bda22498..ce3f7a2e35adbada868856e056c6e5710b3369d0 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
@@ -60,7 +60,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SqParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -70,7 +70,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SqParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SqParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -143,7 +143,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -180,6 +181,6 @@ public:
     }
 };
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
index 86c4e4198d9abff86d5b417bdd9fb7e56735f76a..9e311f897ffb0a3cd4641c4fc6ed37f2c5688952 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
@@ -154,7 +154,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(({:.10e}*{}{:+15.10e})^2)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\left({:.3e}{}{:+8.3e}\\right)^2\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
index dada6dd339d4043523e3ecac64e3898c1bb3764a..9120db2af99680799bd0f957ba09913dc1e8bd97 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SqrtParamNode)
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,23 +17,23 @@ void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
 SqrtParamNode::SqrtParamNode()
 {}
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
+SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SqrtNode(feat, feat_ind),
     _sign_alpha(1.0)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
@@ -43,17 +43,13 @@ SqrtParamNode::SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double
     _params.resize(n_params(), 0.0);
 }
 
-void SqrtParamNode::get_parameters(std::vector<double>& prop)
+void SqrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
     std::vector<double> param_cp(_params);
 
     _sign_alpha = -1.0;
-    if(nlopt_wrapper::optimize_feature_params(d) > min_res)
+    if(optimizer->optimize_feature_params(this) > min_res)
     {
         std::copy_n(param_cp.data(), param_cp.size(), _params.data());
         _sign_alpha = 1.0;
@@ -64,7 +60,7 @@ void SqrtNode::set_value(const double* params, int offset, int depth)
 {
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
 
     if(_selected && is_root)
         allowed_op_funcs::sqrt(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -75,7 +71,7 @@ void SqrtNode::set_value(const double* params, int offset, int depth)
 void SqrtNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
 
     allowed_op_funcs::sqrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, offset + 2), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -85,7 +81,7 @@ void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[0] = 1.0;
     ub[0] = 1.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
@@ -96,7 +92,7 @@ void SqrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     lb[0] = _sign_alpha;
     ub[0] = _sign_alpha;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
index ef6933295188353d8b1c7370df535fe3b9e7c967..173b09e97737d7159697dcee07b6c57e097898a5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
@@ -62,7 +62,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SqrtParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SqrtParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
+    SqrtParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -190,6 +191,6 @@ public:
     }
 };
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
index 6d347f92871c15f6442cf120da49e5df2ce32386..aea3fd135838a5686009a27cb09651d8c406d834 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
@@ -155,7 +155,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -189,7 +190,7 @@ public:
             return fmt::format(
                 "(sqrt({:.10e}*{}{:+15.10e}))",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
                 params[1]
             );
         }
@@ -207,7 +208,7 @@ public:
             return fmt::format(
                 "\\left(\\sqrt{{ {:.3e}{}{:+8.3e} }}\\right)",
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
index 334eff516901c046e8744726f04917b022638b84..6ba6d443d4b71c8df3bf7cbab3cfced5bac467e1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
@@ -2,10 +2,10 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(SubParamNode)
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
+void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
 {
     ++feat_ind;
-    node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, prop);
+    node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     new_feat->set_value();
     if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
@@ -17,21 +17,21 @@ void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 SubParamNode::SubParamNode()
 {}
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
+SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 
     if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
         throw InvalidFeatureException();
 }
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop) :
+SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-    get_parameters(prop);
+    get_parameters(optimizer);
 }
 
 SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound) :
@@ -40,13 +40,9 @@ SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, doubl
     _params.resize(n_params(), 0.0);
 }
 
-void SubParamNode::get_parameters(std::vector<double>& prop)
+void SubParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
-    nlopt_wrapper::feat_data d;
-    d._feat = this;
-    d._prop = prop.data();
-
-    double min_res = nlopt_wrapper::optimize_feature_params(d);
+    double min_res = optimizer->optimize_feature_params(this);
 }
 
 void SubNode::set_value(const double* params, int offset, int depth)
@@ -54,8 +50,8 @@ void SubNode::set_value(const double* params, int offset, int depth)
     bool is_root = (offset == -1);
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->value_ptr(offset + 1);
 
     if(_selected && is_root)
         allowed_op_funcs::sub(_n_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
@@ -67,8 +63,8 @@ void SubNode::set_test_value(const double* params, int offset, int depth)
 {
     offset = (offset == -1) ? rung() : offset;
 
-    double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
-    double* vp_1 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
+    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
+    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, offset + 1, depth + 1) : _feats[1]->test_value_ptr(offset + 1);
 
     allowed_op_funcs::sub(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
 }
@@ -78,7 +74,7 @@ void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = 0.0;
     ub[1] = 0.0;
 
-    if(depth >= nlopt_wrapper::_max_param_depth)
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
         return;
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
index c4144e08205da40624ad57ffe64bf821db66eb26..5038b556d9e94ea3786c244f2ca8629c96a7e956 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
@@ -61,7 +61,7 @@ public:
      * @param u_bound Maximum absolute value allowed for the feature.
      * @param prop The property to fit to
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -72,7 +72,7 @@ public:
      * @param feat_ind Index of the new feature
      * @param prop The property to fit to
      */
-    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::vector<double>& prop);
+    SubParamNode(node_ptr feat_1, node_ptr feat_2, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Constructor
@@ -145,7 +145,8 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::vector<double>& prop);
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+;
 
     /**
      * @brief Set the non-linear parameters
@@ -183,6 +184,6 @@ public:
     }
 };
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
+void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
index 5326d904a03676c3cf766f7cc222307ff3ca503c..50191586d2dc66ca5248c80ce5e6b5d48c784931 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
@@ -165,7 +165,8 @@ public:
          *
          * @param prop property to fit to get the parameters
          */
-        virtual void get_parameters(std::vector<double>& prop){return;}
+        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
+{return;}
 
         /**
          * @brief Set the non-linear parameters
@@ -199,9 +200,9 @@ public:
         {
             return fmt::format(
                 "({} - ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
                 params[1]
             );
         }
@@ -218,9 +219,9 @@ public:
         {
             return fmt::format(
                 "\\left({} - \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[0],
-                (depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
                 params[1]
             );
         }
diff --git a/src/feature_creation/node/operator_nodes/allowed_ops.hpp b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
index dc07904894733b1f708caa170e206464cde23c15..740f99395e99024db6b7032c477c204b3b5ff378 100644
--- a/src/feature_creation/node/operator_nodes/allowed_ops.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
@@ -32,8 +32,8 @@ typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, doubl
 typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double)> bin_op_node_gen;
 
 #ifdef PARAMETERIZE
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, double, std::vector<double>&)> un_param_op_node_gen;
-    typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double, std::vector<double>&)> bin_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, node_ptr, int&, double, double, std::shared_ptr<NLOptimizer>)> un_param_op_node_gen;
+    typedef std::function<void(std::vector<node_ptr>&, node_ptr, node_ptr, int&, double, double, std::shared_ptr<NLOptimizer>)> bin_param_op_node_gen;
 #endif
 
 namespace allowed_op_maps
diff --git a/src/nl_opt/NLOptWrapper.cpp b/src/nl_opt/NLOptWrapper.cpp
index c88ae1201fa495dc88bee4072290af844e255855..fb68beae4fec0c373a848b65fea01b5d6ab8cb5b 100644
--- a/src/nl_opt/NLOptWrapper.cpp
+++ b/src/nl_opt/NLOptWrapper.cpp
@@ -1,78 +1,62 @@
 #include <nl_opt/NLOptWrapper.hpp>
-
-double (*nlopt_wrapper::_objective)(unsigned int n, const double* p, double* grad, void* data) = nlopt_wrapper::objective_reg;
-
-std::vector<double> nlopt_wrapper::_feature_gradient;
-std::vector<double> nlopt_wrapper::_zeros;
-std::vector<int> nlopt_wrapper::_task_sizes;
-std::vector<double> nlopt_wrapper::_residuals;
-std::vector<double> nlopt_wrapper::_work;
-std::vector<double> nlopt_wrapper::_a_copy;
-std::vector<double> nlopt_wrapper::_prop_copy;
-
-int nlopt_wrapper::_max_param_depth;
-
-nlopt::algorithm nlopt_wrapper::_local_opt_alg = nlopt::LD_VAR2;
-double nlopt_wrapper::_cauchy_scaling = 0.5 * 0.5;
-int nlopt_wrapper::_n_samp = 0;
-
-ConvexHull1D nlopt_wrapper::_convex_hull;
-
-void nlopt_wrapper::setup_data(std::vector<int> task_sizes, int max_dim, int n_rung, int max_param_depth)
+int nlopt_wrapper::MAX_PARAM_DEPTH = -1;
+NLOptimizer::NLOptimizer(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, nlopt::func objective, int max_param_depth, bool reset_max_param_depth) :
+    _objective(objective),
+    _a(*std::max_element(task_sizes.begin(), task_sizes.end()) * 2),
+    _prop(prop),
+    _prop_copy(prop),
+    _work(prop.size(), 0.0),
+    _zeros(prop.size(), 0.0),
+    _task_sizes(task_sizes),
+    _n_samp(std::accumulate(task_sizes.begin(), task_sizes.end(), 0.0)),
+    _n_rung(n_rung),
+    _max_params(2 * task_sizes.size()),
+    _max_param_depth(std::min(n_rung, max_param_depth)),
+    _local_opt_alg(nlopt::LD_VAR2)
 {
-    if(max_param_depth == -1)
+    if(prop.size() != _n_samp)
+        throw std::logic_error("Property vector size (" + std::to_string(_prop.size()) + ") and number of samples (" + std::to_string(_n_samp) + ") are not the same");
+
+    if(_max_param_depth == -1)
         _max_param_depth = n_rung;
-    else
-        _max_param_depth = std::min(n_rung, max_param_depth);
-    _task_sizes = task_sizes;
-    _zeros.resize(50, 0.0);
-    _n_samp = std::accumulate(task_sizes.begin(), task_sizes.end(), 0.0);
 
-    int max_params = 2 * task_sizes.size();
-    for(int rr = 1; rr <= n_rung; ++rr)
-        max_params += std::pow(2, rr);
+    if(reset_max_param_depth || (nlopt_wrapper::MAX_PARAM_DEPTH == -1))
+        nlopt_wrapper::MAX_PARAM_DEPTH = _max_param_depth;
+    else if(nlopt_wrapper::MAX_PARAM_DEPTH != _max_param_depth)
+        throw std::logic_error("_max_param_depth (" + std::to_string(_max_param_depth) + ") is not the same as the global one (" + std::to_string(nlopt_wrapper::MAX_PARAM_DEPTH) + ").");
 
-    #pragma omp parallel
-    {
-        _work.resize(_n_samp * max_dim, 0.0);
-        _a_copy.resize(_n_samp * 2, 0.0);
-        _residuals.resize(_n_samp, 0.0);
-        _feature_gradient.resize(max_params * _n_samp, 0.0);
-        _prop_copy.resize(_n_samp, 0.0);
-    }
+    for(int rr = 1; rr <= _max_param_depth; ++rr)
+        _max_params += std::pow(2, rr);
 }
 
-void nlopt_wrapper::set_objective(std::string calc_type, double* prop, const std::vector<int> sizes, int n_rung, int max_param_depth)
+NLOptimizerClassification::NLOptimizerClassification(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, bool reset_max_param_depth) :
+    NLOptimizer(task_sizes, prop, n_rung, nlopt_wrapper::objective_class, max_param_depth, reset_max_param_depth)
 {
-    if(calc_type.compare("classification") == 0)
-    {
-        #pragma omp parallel
-        _convex_hull.initialize_prop(sizes, prop);
-        _objective = objective_class;
-        _local_opt_alg = nlopt::LN_SBPLX;
-        setup_data(sizes, 1, 0, max_param_depth);
-    }
-    else if(calc_type.compare("regression") == 0)
-    {
-        _objective = objective_reg;
-        setup_data(sizes, 1, n_rung, max_param_depth);
-    }
-    else if(calc_type.compare("log_regression") == 0)
-    {
-        _objective = objective_log_reg;
-        setup_data(sizes, 1, n_rung, max_param_depth);
-    }
-    else
-    {
-        throw std::logic_error("projection type can not determined");
-    }
+    _convex_hull = std::make_shared<ConvexHull1D>(task_sizes, _prop.data());
+    _local_opt_alg = nlopt::LN_SBPLX;
 }
 
-double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
+NLOptimizerRegression::NLOptimizerRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool log_reg, bool reset_max_param_depth) :
+    NLOptimizer(task_sizes, prop, n_rung, log_reg ? nlopt_wrapper::objective_log_reg : nlopt_wrapper::objective_reg, max_param_depth, reset_max_param_depth),
+    _feature_gradient(_max_params * _n_samp, 0.0),
+    _residuals(_n_samp, 0.0),
+    _cauchy_scaling(cauchy_scaling * cauchy_scaling)
+{}
+
+NLOptimizerLogRegression::NLOptimizerLogRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool reset_max_param_depth) :
+    NLOptimizerRegression(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, true, reset_max_param_depth)
+{}
+
+double NLOptimizer::optimize_feature_params(Node* feat, bool use_simplex)
 {
+    nlopt_wrapper::feat_data data;
+    data._feat = feat;
+    data._prop = _prop.data();
+    data._optimizer = this;
+
     double minf = 0.0;
-    std::vector<double> params(data._feat->parameters().size() + 2 * _task_sizes.size(), 1.0);
-    std::vector<double> params_final(data._feat->parameters().size(), 1.0);
+    std::vector<double> params(feat->parameters().size() + 2 * _task_sizes.size(), 1.0);
+    std::vector<double> params_final(feat->parameters().size(), 1.0);
 
     dcopy_(params.size() / 2, _zeros.data(), 1, &params[1], 2);
     dcopy_(params_final.size() / 2, _zeros.data(), 1, &params_final[1], 2);
@@ -80,15 +64,16 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
     std::vector<double> lb_global(params.size(), -1e2);
     std::vector<double> ub_global(params.size(),  1e2);
 
-    data._feat->set_bounds(lb_global.data() + 2 * _task_sizes.size(), ub_global.data() + 2 * _task_sizes.size());
+    feat->set_bounds(lb_global.data() + 2 * _task_sizes.size(), ub_global.data() + 2 * _task_sizes.size());
 
     bool scale_b = (lb_global[_task_sizes.size() * 2 - 2] != ub_global[_task_sizes.size() * 2 - 2]);
     bool scale_c = (lb_global[_task_sizes.size() * 2 - 1] != ub_global[_task_sizes.size() * 2 - 1]);
     int n_dim = scale_b + scale_c;
     int start = 0;
 
-    double* val_ptr = data._feat->value_ptr(params_final.data());
-    std::copy_n(data._prop, _n_samp, _prop_copy.data());
+    double* val_ptr = feat->value_ptr(params_final.data());
+    std::copy_n(_prop.data(), _n_samp, _prop_copy.data());
+
     for(int tt = 0; tt < _task_sizes.size(); ++tt)
     {
         lb_global[tt * 2] = lb_global[_task_sizes.size() * 2 - 2];
@@ -97,12 +82,12 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
         ub_global[tt * 2] = ub_global[_task_sizes.size() * 2 - 2];
         ub_global[tt * 2 + 1] = ub_global[_task_sizes.size() * 2 - 1];
 
-        std::fill_n(_a_copy.data(), _a_copy.size(), 1.0);
+        std::fill_n(_a.data(), _a.size(), 1.0);
         if(scale_b)
-            std::copy_n(val_ptr + start, _task_sizes[tt], _a_copy.data());
+            std::copy_n(val_ptr + start, _task_sizes[tt], _a.data());
 
         int info = 0;
-        dgels_('N', _task_sizes[tt], n_dim, 1, _a_copy.data(), _task_sizes[tt], &_prop_copy[start], _task_sizes[tt], _work.data(), _work.size(), &info);
+        dgels_('N', _task_sizes[tt], n_dim, 1, _a.data(), _task_sizes[tt], &_prop_copy[start], _task_sizes[tt], _work.data(), _work.size(), &info);
         if(info == 0)
         {
             params[tt * 2] = scale_b * _prop_copy[start] + (!scale_b);
@@ -110,15 +95,29 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
         }
         start += _task_sizes[tt];
     }
+    std::transform(
+        lb_global.begin(),
+        lb_global.end(),
+        params.begin(),
+        params.begin(),
+        [](double lb, double p){return p < lb ? lb : p;}
+    );
+
+    std::transform(
+        ub_global.begin(),
+        ub_global.end(),
+        params.begin(),
+        params.begin(),
+        [](double ub, double p){return p > ub ? ub : p;}
+    );
 
     nlopt::opt opt_global(nlopt::GN_ISRES, params.size());
-    opt_global.set_min_objective(nlopt_wrapper::_objective, &data);
+    opt_global.set_min_objective(_objective, &data);
     opt_global.set_maxeval(2500);
     opt_global.set_xtol_rel(1e-2);
     opt_global.set_lower_bounds(lb_global);
     opt_global.set_upper_bounds(ub_global);
 
-
     nlopt::opt opt_local((use_simplex ? nlopt::LN_SBPLX : _local_opt_alg), params.size());
     opt_local.set_min_objective(_objective, &data);
     opt_local.set_maxeval(2500);
@@ -145,6 +144,211 @@ double nlopt_wrapper::optimize_feature_params(feat_data data, bool use_simplex)
     {
         minf = HUGE_VAL;
     }
-    data._feat->set_parameters(params_final);
+    feat->set_parameters(params_final);
     return !std::isnan(minf) ? minf : std::numeric_limits<double>::infinity();
 }
+
+double nlopt_wrapper::objective_class(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    return d->_optimizer->convex_hull()->overlap_1d(d->_feat->value_ptr(p));
+}
+
+double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    double* val_ptr = d->_feat->value_ptr(p + 2 * d->_optimizer->task_sizes().size());
+
+    int start = 0;
+    std::fill_n(d->_optimizer->feature_gradient(0), n * d->_optimizer->n_samp(), 0.0);
+    for(int tt = 0; tt < d->_optimizer->task_sizes().size(); ++tt)
+    {
+        // Calculate the residual
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->residuals(start),
+            [p, tt, d](double vp, double prop){
+                return d->_optimizer->cauchy_scaling() * std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / d->_optimizer->cauchy_scaling()) / d->_optimizer->n_samp();
+            }
+        );
+
+        // Calculate the base of the gradient for each step:
+
+        // Contribution to the derivative from (p - (\alpha_task * feat_val + a_task) )^2
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [p, tt](double vp, double prop){
+                return prop - (vp * p[2*tt] + p[2*tt + 1]);
+            }
+        );
+
+        // Contribution from log(1 + s/a^2)
+        std::transform(
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [d](double s){
+                return -2.0 / (1.0 + std::pow(s, 2.0) / d->_optimizer->cauchy_scaling()) * s;
+            }
+        );
+
+        // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * f_i
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt) * d->_optimizer->n_samp() + start),
+            [](double vp, double s){
+                return vp * s;
+            }
+        );
+        // Calculate the gradients of the individual feature parameters
+        // First Calculate contribution from \partial s / \partial p
+        for(int pp = 0; pp < d->_feat->n_params() / 2; ++pp)
+        {
+            // \partial s_i / \partial(\alpha_f or a_f) = \partial s_i/\partial a_task * \alpha_task
+            std::transform(
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+                d->_optimizer->feature_gradient(2 * (d->_optimizer->task_sizes().size() + pp) * d->_optimizer->n_samp() + start),
+                [p, tt](double s){return p[2 * tt] * s;}
+            );
+            std::copy_n(
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                d->_optimizer->task_sizes()[tt],
+                d->_optimizer->feature_gradient((2 * (pp + d->_optimizer->task_sizes().size()) + 1) * d->_optimizer->n_samp() + start)
+            );
+        }
+        start += d->_optimizer->task_sizes()[tt];
+    }
+    // Add the component from the feature gradient
+    d->_feat->gradient(
+        d->_optimizer->feature_gradient(d->_optimizer->n_samp() * d->_optimizer->task_sizes().size() * 2),
+        d->_optimizer->work(),
+        p + 2 * d->_optimizer->task_sizes().size()
+    );
+
+    if(grad)
+    {
+        // Total the individual residual derivatives
+        for(int pp = 0; pp < 2 * d->_optimizer->task_sizes().size() + d->_feat->n_params(); ++ pp)
+            grad[pp] = 1.0 / d->_optimizer->n_samp() * std::accumulate(
+                d->_optimizer->feature_gradient(pp * d->_optimizer->n_samp()),
+                d->_optimizer->feature_gradient((pp + 1) * d->_optimizer->n_samp()),
+                0.0
+            );
+    }
+    return std::accumulate(d->_optimizer->residuals(0), d->_optimizer->residuals(d->_optimizer->n_samp()), 0.0);
+}
+
+double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double* grad, void* data)
+{
+    feat_data* d = (feat_data*) data;
+    double* val_ptr = d->_feat->value_ptr(p + 2 * d->_optimizer->task_sizes().size());
+
+    std::fill_n(d->_optimizer->feature_gradient(0), n * d->_optimizer->n_samp(), 0.0);
+    int start = 0;
+    for(int tt = 0; tt < d->_optimizer->task_sizes().size(); ++tt)
+    {
+        // Calculate the residual
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->residuals(start),
+            [p, tt, d](double vp, double prop){
+                return d->_optimizer->cauchy_scaling() * std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / d->_optimizer->cauchy_scaling());
+            }
+        );
+
+        // Calculate the base of the gradient for each step
+
+        // Contribution to the derivative from (p - (\alpha_task * log(feat_val) + a_task) )^2
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_prop + start,
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [p, tt](double vp, double prop){
+                return prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]);
+            }
+        );
+
+        // Contribution from log(1 + s/a^2)
+        std::transform(
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start +d->_optimizer->task_sizes()[tt]),
+            d->_optimizer->feature_gradient((2 * tt + 1)*d->_optimizer->n_samp() + start),
+            [d](double s){
+                return -2.0 / (1.0 + std::pow(s / d->_optimizer->cauchy_scaling(), 2.0)) * s;
+            }
+        );
+
+        // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * log(f_i)
+        std::transform(
+            val_ptr + start,
+            val_ptr + d->_optimizer->task_sizes()[tt] + start,
+            d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+            d->_optimizer->feature_gradient(2 * tt * d->_optimizer->n_samp() + start),
+            [](double vp, double s){
+                return vp * std::log(s);
+            }
+        );
+
+        for(int pp = 0; pp < d->_feat->n_params(); ++pp)
+        {
+            // \partial s_i / \partial(\alpha_f or a_f) = \partial s_i/\partial a_task * \alpha_task / feat_i
+            std::transform(
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start),
+                d->_optimizer->feature_gradient((2 * tt + 1) * d->_optimizer->n_samp() + start + d->_optimizer->task_sizes()[tt]),
+                val_ptr + start,
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                [p, tt](double s, double vp){return p[2 * tt] * s / vp;}
+            );
+            std::copy_n(
+                d->_optimizer->feature_gradient(2 * (pp + d->_optimizer->task_sizes().size()) * d->_optimizer->n_samp() + start),
+                d->_optimizer->task_sizes()[tt],
+                d->_optimizer->feature_gradient((2 * (pp + d->_optimizer->task_sizes().size()) + 1) * d->_optimizer->n_samp() + start)
+            );
+        }
+        start += d->_optimizer->task_sizes()[tt];
+    }
+
+    // Add the component from the feature gradient
+    d->_feat->gradient(
+        d->_optimizer->feature_gradient(d->_optimizer->n_samp() * d->_optimizer->task_sizes().size() * 2),
+        d->_optimizer->work(),
+        p + 2 * d->_optimizer->task_sizes().size()
+    );
+
+    if(grad)
+    {
+        for(int pp = 0; pp < 2 * d->_optimizer->task_sizes().size() + d->_feat->n_params(); ++ pp)
+            grad[pp] = 1.0 / d->_optimizer->n_samp() * std::accumulate(
+                d->_optimizer->feature_gradient(pp * d->_optimizer->n_samp()),
+                d->_optimizer->feature_gradient((pp + 1) * d->_optimizer->n_samp()),
+                0.0
+            );
+    }
+
+    return std::accumulate(d->_optimizer->residuals(0), d->_optimizer->residuals(d->_optimizer->n_samp()), 0.0);
+}
+
+std::shared_ptr<NLOptimizer> nlopt_wrapper::get_optimizer(std::string project_type, const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth, double cauchy_scaling, bool reset_max_param_depth)
+{
+    if(project_type.compare("classification") == 0)
+        return std::make_shared<NLOptimizerClassification>(task_sizes, prop, n_rung, max_param_depth, reset_max_param_depth);
+    else if(project_type.compare("regression") == 0)
+        return std::make_shared<NLOptimizerRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+    else if(project_type.compare("log_regression") == 0)
+        return std::make_shared<NLOptimizerLogRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+    else
+        throw std::logic_error("Invalid project type (" + project_type + ") was passed to get_optimizer.");
+
+    return nullptr;
+}
diff --git a/src/nl_opt/NLOptWrapper.hpp b/src/nl_opt/NLOptWrapper.hpp
index 300c96162b70f821e8726c3c7a87f2c451835de8..75b2cf3593c1da815cf39c2f52aa806d1994fba3 100644
--- a/src/nl_opt/NLOptWrapper.hpp
+++ b/src/nl_opt/NLOptWrapper.hpp
@@ -11,288 +11,462 @@
 #include <feature_creation/node/Node.hpp>
 #include <classification/ConvexHull1D.hpp>
 
-namespace nlopt_wrapper
+class NLOptimizer
 {
-    extern double (*_objective)(unsigned int n, const double* p, double* grad, void* data); //!< Objective function to use
+protected:
+    nlopt::func _objective; //!< Objective function to use
+
+    std::vector<double> _a; //!< vector to store the A matrix for dgels
+    std::vector<double> _prop; //!< The property to fit the functions against
+    std::vector<double> _prop_copy; //!< Copy of the property to keep for dgels
+    std::vector<double> _work; //!< work array for dgels
+    std::vector<double> _zeros; //!< array of zeros to fill parameters
+    std::vector<int> _task_sizes; //!< number of samples in each task
+
+    int _n_samp; //!< total number of samples
+    int _n_rung; //!< Maximum rung of the features
+    int _max_params; //!< Maximum number of possible parameters
+    int _max_param_depth; //!< parameterize features to all depths of the tree
+
+    nlopt::algorithm _local_opt_alg; //!< Algorithm used for local optimization
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     */
+    NLOptimizer(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, nlopt::func objective, int max_param_depth=-1, bool reset_max_param_depth=false);
 
-    extern std::vector<double> _feature_gradient; //!< vector used to calculate the contribution of feature derivatives to the gradient
-    extern std::vector<double> _zeros; //!< array of zeros to fill parameters
-    extern std::vector<double> _residuals; //!< storage space for the residuals
-    extern std::vector<double> _work; //!< work array for dgels
-    extern std::vector<double> _a_copy; //!< Copy of the initial a vector for least squares problems
-    extern std::vector<double> _prop_copy; //!< Copy of the property vector
-    extern std::vector<int> _task_sizes; //!< tasks sizes
-    extern double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
-    extern int _n_samp; //!< total number of samples
-    extern nlopt::algorithm _local_opt_alg; //!< Algorithm used for local optimization
-    extern int _max_param_depth; //!< parameterize features to all depths of the tree
+    // DocString: nloptimizer_optimize_feature_params
+    /**
+     * @brief uses nlopt to optimize the parameters of a feature
+     *
+     * @param data data structure that passes the feature and prop to nlopt
+     * @param use_simplex If true use a Nelder-Mead type optimizer (LN_SBPLEX) for local optimization
+     *                    Otherwise use a gradient decent based approach if possible
+     */
+    double optimize_feature_params(Node* feat, bool use_simplex=false);
+
+    inline std::vector<int>& task_sizes(){return _task_sizes;}
+    inline std::vector<double>& prop(){return _prop;}
+    inline double* work(){return _work.data();}
+    inline int n_samp(){return _n_samp;}
+    inline int n_rung(){return _n_rung;}
+    inline int max_params(){return _max_params;}
+    inline int max_param_depth(){return _max_param_depth;}
+    inline nlopt::algorithm local_opt_alg (){return _local_opt_alg;}
+
+    virtual std::shared_ptr<ConvexHull1D> convex_hull() = 0;
+    virtual double* feature_gradient(int ind) = 0;
+    virtual double* residuals(int ind) = 0;
+    virtual double cauchy_scaling() = 0;
+};
+
+class NLOptimizerClassification: public NLOptimizer
+{
+protected:
+    std::shared_ptr<ConvexHull1D> _convex_hull; //!< Object to perform classification
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     */
+    NLOptimizerClassification(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, bool reset_max_param_depth=false);
 
-    extern ConvexHull1D _convex_hull; //!< Object to perform classification
+    inline std::shared_ptr<ConvexHull1D> convex_hull(){return _convex_hull;}
+    inline double* feature_gradient(int ind){return nullptr;}
+    inline double* residuals(int ind){return nullptr;}
+    inline double cauchy_scaling(){return 0.0;}
+};
 
-    #pragma omp threadprivate(_work, _a_copy, _residuals, _feature_gradient, _prop_copy, _convex_hull)
+class NLOptimizerRegression: public NLOptimizer
+{
+protected:
+    std::vector<double> _feature_gradient; //!< vector used to calculate the contribution of feature derivatives to the gradient
+    std::vector<double> _residuals; //!< storage space for the residuals
+    double _cauchy_scaling; //!< Scaling factor for calculating the cauchy loss function
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     */
+    NLOptimizerRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool log_reg=false, bool reset_max_param_depth=false);
 
-    typedef struct
-    {
-        double* _prop; //!< pointer to the property vector
-        double* _a; //!< Node pointer of the feature to parameterize
-        int _n_feat; //!< Node pointer of the feature to parameterize
-    } l0_data;
+    inline std::shared_ptr<ConvexHull1D> convex_hull(){return nullptr;}
+    inline double* feature_gradient(int ind){return &_feature_gradient[ind];}
+    inline double* residuals(int ind){return &_residuals[ind];}
+    inline double cauchy_scaling(){return _cauchy_scaling;}
+};
+
+class NLOptimizerLogRegression: public NLOptimizerRegression
+{
+public:
+    /**
+     * @brief Constructor
+     *
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     */
+    NLOptimizerLogRegression(const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool reset_max_param_depth=false);
+};
+
+namespace nlopt_wrapper
+{
+    extern int MAX_PARAM_DEPTH;
 
     typedef struct
     {
         double* _prop; //!< pointer to the property vector
         Node* _feat; //!< Node pointer of the feature to parameterize
+        NLOptimizer* _optimizer; //!< Data structure to store information for the optimization
     } feat_data;
 
 
-    static double objective_class(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        return _convex_hull.overlap_1d(d->_feat->value_ptr(p));
-    }
+    static double objective_class(unsigned int n, const double* p, double* grad, void* data);
 
-    static double objective_reg(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        double* val_ptr = d->_feat->value_ptr(p + 2 * _task_sizes.size());
+    static double objective_reg(unsigned int n, const double* p, double* grad, void* data);
 
-        int start = 0;
-        std::fill_n(_feature_gradient.data(), _feature_gradient.size(), 0.0);
-        for(int tt = 0; tt < _task_sizes.size(); ++tt)
-        {
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_residuals[start],
-                [p, tt](double vp, double prop){
-                    return _cauchy_scaling * std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / _cauchy_scaling) / _n_samp;
-                }
-            );
-
-            // Calculate the base of the gradient for each step
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double prop){
-                    return prop - (vp * p[2*tt] + p[2*tt + 1]);
-                }
-            );
+    static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data);
 
-            std::transform(
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start +_task_sizes[tt]],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [](double s){
-                    return -2.0 / (1.0 + std::pow(s, 2.0) / _cauchy_scaling) * s;
-                }
-            );
+    /**
+     * @brief Get an optimizer for the desired task
+     *
+     * @param project_type The type of projection operator to optimize the features for
+     * @param task_sizes number of samples in each task
+     * @param prop The property to fit the functions against
+     * @param n_rung Maximum rung of the features
+     * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+     * @param scaling factor used for the Cauchy loss function
+     *
+     * @return The correct optimizer
+     */
+    std::shared_ptr<NLOptimizer> get_optimizer(std::string project_type, const std::vector<int>& task_sizes, const std::vector<double>& prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5, bool reset_max_param_depth=false);
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                &_feature_gradient[(2 * tt) * _n_samp + start],
-                [](double vp, double s){
-                    return vp * s;
-                }
+    #ifdef PY_BINDINGS
+        // DocString: nlopt_wrapper_get_reg_optimizer_list_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
-            for(int pp = 0; pp < d->_feat->n_params() / 2; ++pp)
-            {
-                std::transform(
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start + _task_sizes[tt]],
-                    &_feature_gradient[2 * (_task_sizes.size() + pp) * _n_samp + start],
-                    [p, tt](double s){return p[2 * tt] * s;}
-                );
-                std::copy_n(
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    _task_sizes[tt],
-                    &_feature_gradient[(2 * (pp + _task_sizes.size()) + 1) * _n_samp + start]
-                );
-            }
-            start += _task_sizes[tt];
         }
-        d->_feat->gradient(&_feature_gradient.data()[_n_samp * _task_sizes.size() * 2], _work.data(), p + 2 * _task_sizes.size());
 
-        if(grad)
+        // DocString: nlopt_wrapper_get_reg_optimizer_list_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
         {
-            for(int pp = 0; pp < 2 * _task_sizes.size() + d->_feat->n_params(); ++ pp)
-                grad[pp] = std::accumulate(&_feature_gradient[pp * _n_samp], &_feature_gradient[(pp + 1) * _n_samp], 0.0) / _n_samp;
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
+            );
         }
 
-        return std::accumulate(_residuals.begin(), _residuals.end(), 0.0);
-    }
-
-    static double objective_log_reg(unsigned int n, const double* p, double* grad, void* data)
-    {
-        feat_data* d = (feat_data*) data;
-        double* val_ptr = d->_feat->value_ptr(p + 2 * _task_sizes.size());
-
-        d->_feat->gradient(_feature_gradient.data(), _work.data());
-        int start = 0;
-        for(int tt = 0; tt < _task_sizes.size(); ++tt)
+        // DocString: nlopt_wrapper_get_reg_optimizer_arr_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
         {
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_residuals[start],
-                [p, tt](double vp, double prop){
-                    return _cauchy_scaling * std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / _cauchy_scaling);
-                }
-            );
-
-            // Calculate the base of the gradient for each step
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                d->_prop + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double prop){
-                    return prop - std::log(vp * p[2*tt] + p[2*tt + 1]);
-                }
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
+        }
 
-            std::transform(
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start +_task_sizes[tt]],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [](double s){
-                    return -2.0 / (1.0 + std::pow(s / _cauchy_scaling, 2.0)) * s;
-                }
+        // DocString: nlopt_wrapper_get_reg_optimizer_arr_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerRegression get_reg_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                false,
+                true
             );
+        }
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                &_feature_gradient[(2 * tt + 1)*_n_samp + start],
-                [p, tt](double vp, double s){
-                    return s / (p[2 * tt] * vp + p[2 * tt + 1]);
-                }
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_list_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerLogRegression get_log_reg_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
             );
+        }
 
-            std::transform(
-                val_ptr + start,
-                val_ptr + _task_sizes[tt] + start,
-                &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                &_feature_gradient[2 * tt * _n_samp + start],
-                [](double vp, double s){
-                    return vp * s;
-                }
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_list_arr
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerLogRegression get_log_reg_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
             );
-
-            for(int pp = 0; pp < d->_feat->n_params(); ++pp)
-            {
-                std::transform(
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start],
-                    &_feature_gradient[(2 * tt + 1) * _n_samp + start + _task_sizes[tt]],
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    [p, tt](double s){return p[2 * tt] * s;}
-                );
-                std::copy_n(
-                    &_feature_gradient[2 * (pp + _task_sizes.size()) * _n_samp + start],
-                    _task_sizes[tt],
-                    &_feature_gradient[(2 * (pp + _task_sizes.size()) + 1) * _n_samp + start]
-                );
-            }
-            start += _task_sizes[tt];
         }
-        return std::accumulate(_residuals.begin(), _residuals.end(), 0.0);
-    }
-
-    /**
-     * @brief uses nlopt to optimize the parameters of a feature
-     *
-     * @param data data structure that passes the feature and prop to nlopt
-     * @param use_simplex If true use a Nelder-Mead type optimizer (LN_SBPLEX) for local optimization
-     *                    Otherwise use a gradient decent based approach if possible
-     */
-    double optimize_feature_params(feat_data data, bool use_simplex=false);
-
-    /**
-     * @brief Set up the projection operator for the objective function
-     *
-     * @param task_sizes number of samples per task
-     * @param max_dim Maximum dimension of the features
-     * @param n_rung maximum rung of a feature
-     */
-    void setup_data(std::vector<int> task_sizes, int max_dim, int n_rung, int max_param_depth=-1);
-
-    /**
-     * @brief Set up the projection operator for the objective function
-     *
-     * @param calc_type string key for the type of the calculation to run
-     * @param prop pointer to the property
-     * @param sizes number of samples per task
-     * @param n_rung maximum rung of a feature
-     */
-    void set_objective(std::string calc_type, double* prop, const std::vector<int> sizes, int n_rung, int max_param_depth=100);
 
-    #ifdef PY_BINDINGS
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_list
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param task_sizes number of samples per task
-         * @param max_dim Maximum dimension of the features
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
+         *
+         * @return The correct optimizer
          */
-        inline void setup_data(py::list task_sizes, int max_dim, int n_rung, int max_param_depth=-1){setup_data(python_conv_utils::from_list<int>(task_sizes), max_dim, n_rung, max_param_depth);}
+        inline NLOptimizerLogRegression get_log_reg_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
+            );
+        }
 
+        // DocString: nlopt_wrapper_get_log_reg_optimizer_arr_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         * @param scaling factor used for the Cauchy loss function
          *
-         * @param task_sizes number of samples per task
-         * @param max_dim Maximum dimension of the features
-         * @param n_rung maximum rung of a feature
+         * @return The correct optimizer
          */
-        inline void setup_data(np::ndarray task_sizes, int max_dim, int n_rung, int max_param_depth=-1){setup_data(python_conv_utils::from_ndarray<int>(task_sizes), max_dim, n_rung, max_param_depth);}
+        inline NLOptimizerLogRegression get_log_reg_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1, double cauchy_scaling=0.5)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
+            return NLOptimizerLogRegression(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                cauchy_scaling,
+                true
+            );
+        }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_list_list
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, py::list prop, py::list sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_list<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_list_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, np::ndarray prop, py::list sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(py::list task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_list<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_list<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
+        }
+
+        // DocString: nlopt_wrapper_get_class_optimizer_arr_list
+        /**
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
+         *
+         * @return The correct optimizer
+         */
+        inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, py::list prop, int n_rung, int max_param_depth=-1)
+        {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
+            std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
 
+        // DocString: nlopt_wrapper_get_class_optimizer_arr_arr
         /**
-         * @brief Set up the projection operator for the objective function
+         * @brief Get an optimizer for the desired task
+         *
+         * @param task_sizes number of samples in each task
+         * @param prop The property to fit the functions against
+         * @param n_rung Maximum rung of the features
+         * @param max_param_depth maximum depth of the binary expression tress to parameterize from the root
          *
-         * @param calc_type string key for the type of the calculation to run
-         * @param prop list to the property
-         * @param N number of samples per task
-         * @param n_rung maximum rung of a feature
+         * @return The correct optimizer
          */
-        inline void set_objective(std::string calc_type, np::ndarray prop, np::ndarray sizes, int n_rung, int max_param_depth=100)
+        inline NLOptimizerClassification get_class_optimizer(np::ndarray task_sizes, np::ndarray prop, int n_rung, int max_param_depth=-1)
         {
+            std::vector<int> ts_vec = python_conv_utils::from_ndarray<int>(task_sizes);
             std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
-            return set_objective(calc_type, prop_vec.data(), python_conv_utils::from_ndarray<int>(sizes), n_rung, max_param_depth);
+            return NLOptimizerClassification(
+                ts_vec,
+                prop_vec,
+                n_rung,
+                max_param_depth,
+                true
+            );
         }
     #endif
+
 }
 
 
diff --git a/src/python/__init__.py b/src/python/__init__.py
index ec156aaf51bebb86f041813580fdb7337f38808f..48924cae16d3dcc337df25b9c60c9bb18cd35094 100644
--- a/src/python/__init__.py
+++ b/src/python/__init__.py
@@ -316,7 +316,7 @@ def generate_fs(
         phi_0,
         allowed_ops,
         allowed_param_ops,
-        list(prop),
+        prop,
         task_sizes_train,
         calc_type,
         max_phi,
diff --git a/src/python/bindings_docstring_keyed.cpp b/src/python/bindings_docstring_keyed.cpp
index c92a21ce6a7249bf658f3a93328e1db31739adf2..a110f252b360435f8aecf362e01ddf0126ffec1f 100644
--- a/src/python/bindings_docstring_keyed.cpp
+++ b/src/python/bindings_docstring_keyed.cpp
@@ -58,18 +58,43 @@ void sisso::register_all()
     def("initialize_values_arr", &node_value_arrs::initialize_values_arr);
     def("initialize_d_matrix_arr", &node_value_arrs::initialize_d_matrix_arr);
 
-    void (*set_objective_list_list)(std::string, py::list, py::list, int, int) = &nlopt_wrapper::set_objective;
-    void (*set_objective_arr_list)(std::string, np::ndarray, py::list, int, int) = &nlopt_wrapper::set_objective;
-    void (*set_objective_arr_arr)(std::string, np::ndarray, np::ndarray, int, int) = &nlopt_wrapper::set_objective;
 
-    def("set_objective", set_objective_list_list);
-    def("set_objective", set_objective_arr_list);
-    def("set_objective", set_objective_arr_arr);
+    #ifdef PARAMETERIZE
+        sisso::feature_creation::nloptimizer::registerNLOptimizer();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerClassification();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerRegression();
+        sisso::feature_creation::nloptimizer::registerNLOptimizerLogRegression();
 
-    void(*setup_data_list)(py::list, int, int, int) = &nlopt_wrapper::setup_data;
-    void(*setup_data_arr)(np::ndarray, int, int, int) = &nlopt_wrapper::setup_data;
-    def("setup_data", setup_data_list);
-    def("setup_data", setup_data_arr);
+        NLOptimizerRegression(*get_reg_optimizer_list_list)(py::list, py::list, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_list_arr)(py::list, np::ndarray, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_arr_list)(np::ndarray, py::list, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+        NLOptimizerRegression(*get_reg_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int, double) = &nlopt_wrapper::get_reg_optimizer;
+
+        NLOptimizerLogRegression(*get_log_reg_optimizer_list_list)(py::list, py::list, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_list_arr)(py::list, np::ndarray, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_arr_list)(np::ndarray, py::list, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+        NLOptimizerLogRegression(*get_log_reg_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int, double) = &nlopt_wrapper::get_log_reg_optimizer;
+
+        NLOptimizerClassification(*get_class_optimizer_list_list)(py::list, py::list, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_list_arr)(py::list, np::ndarray, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_arr_list)(np::ndarray, py::list, int, int) = &nlopt_wrapper::get_class_optimizer;
+        NLOptimizerClassification(*get_class_optimizer_arr_arr)(np::ndarray, np::ndarray, int, int) = &nlopt_wrapper::get_class_optimizer;
+
+        def("get_reg_optimizer", get_reg_optimizer_list_list, "@DocString_nlopt_wrapper_get_reg_optimizer_list_list");
+        def("get_reg_optimizer", get_reg_optimizer_list_arr, "@DocString_nlopt_wrapper_get_reg_optimizer_list_arr");
+        def("get_reg_optimizer", get_reg_optimizer_arr_list, "@DocString_nlopt_wrapper_get_reg_optimizer_arr_list");
+        def("get_reg_optimizer", get_reg_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_reg_optimizer_arr_arr");
+
+        def("get_log_reg_optimizer", get_log_reg_optimizer_list_list, "@DocString_nlopt_wrapper_get_log_reg_optimizer_list_list");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_list_arr, "@DocString_nlopt_wrapper_get_log_reg_optimizer_list_arr");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_arr_list, "@DocString_nlopt_wrapper_get_log_reg_optimizer_arr_list");
+        def("get_log_reg_optimizer", get_log_reg_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_log_reg_optimizer_arr_arr");
+
+        def("get_class_optimizer", get_class_optimizer_list_list, "@DocString_nlopt_wrapper_get_class_optimizer_list_list");
+        def("get_class_optimizer", get_class_optimizer_list_arr, "@DocString_nlopt_wrapper_get_class_optimizer_list_arr");
+        def("get_class_optimizer", get_class_optimizer_arr_list, "@DocString_nlopt_wrapper_get_class_optimizer_arr_list");
+        def("get_class_optimizer", get_class_optimizer_arr_arr, "@DocString_nlopt_wrapper_get_class_optimizer_arr_arr");
+    #endif
 }
 
 void sisso::feature_creation::registerFeatureSpace()
@@ -126,6 +151,25 @@ void sisso::feature_creation::registerUnit()
 }
 
 #ifdef PARAMETERIZE
+    void sisso::feature_creation::nloptimizer::registerNLOptimizer()
+    {
+        class_<sisso::feature_creation::nloptimizer::NLOptimizerWrap, boost::noncopyable>("NLOptimizer", no_init)
+            .def("optimize_feature_params", &NLOptimizer::optimize_feature_params, "@DocString_nloptimizer_optimize_feature_params@")
+        ;
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerClassification()
+    {
+        class_<NLOptimizerClassification, bases<NLOptimizer>>("NLOptimizerClassification", no_init);
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerRegression()
+    {
+        class_<NLOptimizerRegression, bases<NLOptimizer>>("NLOptimizerRegression", no_init);
+    }
+    void sisso::feature_creation::nloptimizer::registerNLOptimizerLogRegression()
+    {
+        class_<NLOptimizerLogRegression, bases<NLOptimizer>>("NLOptimizerLogRegression", no_init);
+    }
+
     void sisso::feature_creation::node::registerNode()
     {
         void (Node::*reindex_1)(int) = &Node::reindex;
diff --git a/src/python/bindings_docstring_keyed.hpp b/src/python/bindings_docstring_keyed.hpp
index 2eebb78d2d59e43c5955cbc03442894ba756be99..8183ab83bef8b0c903ea6d67292a90e5fad7f56c 100644
--- a/src/python/bindings_docstring_keyed.hpp
+++ b/src/python/bindings_docstring_keyed.hpp
@@ -31,6 +31,22 @@ namespace sisso
         static void registerFeatureSpace();
         static void registerDomain();
         static void registerUnit();
+        namespace nloptimizer
+        {
+            struct NLOptimizerWrap : NLOptimizer, py::wrapper<NLOptimizer>
+            {
+            public:
+                inline std::shared_ptr<ConvexHull1D> convex_hull(){return this->get_override("convex_hull")();}
+                inline double* feature_gradient(int ind){return this->get_override("feature_gradient")();}
+                inline double* residuals(int ind){return this->get_override("residuals")();}
+                inline double cauchy_scaling(){return this->get_override("cauchy_scaling")();}
+            };
+            static void registerNLOptimizer();
+            static void registerNLOptimizerClassification();
+            static void registerNLOptimizerRegression();
+            static void registerNLOptimizerLogRegression();
+
+        }
         namespace node
         {
             /**
@@ -95,7 +111,7 @@ namespace sisso
                 inline std::string get_latex_expr(){return this->get_override("latex_expr")();}
                 inline void update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot){this->get_override("update_add_sub_leaves")();}
                 inline void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot){this->get_override("update_div_mult_leaves")();}
-                inline void get_parameters(std::vector<double>& prop){this->get_override("get_parameters")();}
+                inline void get_parameters(std::shared_ptr<NLOptimizer> optimizer){this->get_override("get_parameters")();}
                 inline void set_parameters(std::vector<double>, bool check_sz=true){this->get_override("set_parameters")();}
                 inline std::vector<double> parameters(){return this->get_override("parameters")();}
                 inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1){this->get_override("set_bounds")();}
@@ -144,8 +160,6 @@ namespace sisso
                 template<int N>
                 static void registerOperatorNode()
                 {
-                    void (OperatorNode<N>::*get_parameters_list)(py::list) = &OperatorNode<N>::get_parameters;
-                    void (OperatorNode<N>::*get_parameters_arr)(np::ndarray) = &OperatorNode<N>::get_parameters;
                     void (OperatorNode<N>::*set_params_list)(py::list) = &OperatorNode<N>::set_parameters;
                     void (OperatorNode<N>::*set_params_arr)(np::ndarray) = &OperatorNode<N>::set_parameters;
                     py::class_<OperatorNodeWrap<N>, py::bases<Node>, boost::noncopyable>("OperatorNode")
@@ -153,8 +167,7 @@ namespace sisso
                         .def("is_const", &OperatorNode<N>::is_const, "@DocString_op_node_is_const@")
                         .def("rung", py::pure_virtual(&OperatorNode<N>::rung), "@DocString_op_node_rung@")
                         .def("unit", py::pure_virtual(&OperatorNode<N>::unit), "@DocString_op_node_unit@")
-                        .def("get_parameters", get_parameters_arr, "@DocString_op_node_param_arr@")
-                        .def("get_parameters", get_parameters_list, "@DocString_op_node_param_list@")
+                        .def("get_parameters", py::pure_virtual(&OperatorNode<N>::get_parameters), "@DocString_op_node_get_params@")
                         .def("set_parameters", set_params_arr, "@DocString_op_node_set_param_arr@")
                         .def("set_parameters", set_params_list, "@DocString_op_node_set_param_list@")
                         .add_property("n_feats", &OperatorNode<N>::n_feats, "@DocString_op_node_n_feats@")
diff --git a/src/python/feature_creation/FeatureSpace.cpp b/src/python/feature_creation/FeatureSpace.cpp
index d9172f913f9f58656c7ef268eb6371ec7b8d6819..46f1bcecf8fe8be8f9ea7ebcbd4ba4ca0c98a0f6 100644
--- a/src/python/feature_creation/FeatureSpace.cpp
+++ b/src/python/feature_creation/FeatureSpace.cpp
@@ -26,6 +26,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -38,7 +39,7 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi[0]->n_samp()),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
 FeatureSpace::FeatureSpace(
@@ -67,6 +68,7 @@ FeatureSpace::FeatureSpace(
     _start_gen(1, 0),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(min_abs_feat_val),
@@ -79,7 +81,7 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi[0]->n_samp()),
     _max_param_depth(max_param_depth)
 {
-    initialize_fs(project_type);
+    initialize_fs();
 }
 
 FeatureSpace::FeatureSpace(
@@ -97,6 +99,7 @@ FeatureSpace::FeatureSpace(
     _task_sizes(python_conv_utils::from_list<int>(task_sizes)),
     _feature_space_file("feature_space/selected_features.txt"),
     _feature_space_summary_file("feature_space/SIS_summary.txt"),
+    _project_type(project_type),
     _mpi_comm(mpi_setup::comm),
     _cross_cor_max(cross_corr_max),
     _l_bound(1e-50),
@@ -108,17 +111,17 @@ FeatureSpace::FeatureSpace(
     _n_samp(_phi_0[0]->n_samp()),
     _max_param_depth(-1)
 {
-    if(project_type.compare("regression") == 0)
+    if(_project_type.compare("regression") == 0)
     {
         _project = project_funcs::project_r2;
         _project_no_omp = project_funcs::project_r2_no_omp;
     }
-    else if(project_type.compare("classification") == 0)
+    else if(_project_type.compare("classification") == 0)
     {
         _project = project_funcs::project_classify;
         _project_no_omp = project_funcs::project_classify_no_omp;
     }
-    else if(project_type.compare("log_regression") == 0)
+    else if(_project_type.compare("log_regression") == 0)
     {
         if(_task_sizes.size() > 1)
             throw std::logic_error("Log Regression can not be done using multiple tasks.");
@@ -191,7 +194,7 @@ FeatureSpace::FeatureSpace(
 
     for(int rr = 1; rr < _max_phi; ++rr)
     {
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, rr);
+        nlopt_wrapper::MAX_PARAM_DEPTH = rr;
         bool is_correct = true;
         for(auto& feat : _phi)
         {
@@ -322,7 +325,7 @@ FeatureSpace::FeatureSpace(
 
     for(int rr = 1; rr < _max_phi; ++rr)
     {
-        nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, rr);
+        nlopt_wrapper::MAX_PARAM_DEPTH = rr;
         bool is_correct = true;
         for(auto& feat : _phi)
         {
diff --git a/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc b/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
index 1b163477c933a0d21b12d03dc516facef33a7e02..08667d3622e6f4839ae927c3412edb1967bd4938 100644
--- a/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_abs_diff_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -48,7 +50,7 @@ namespace
 
             allowed_op_funcs::abs_diff(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -61,20 +63,23 @@ namespace
 
         double _a;
         double _alpha;
+
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AbsDiffParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AbsDiffParamNode created with an absolute value above the upper bound)";
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AbsDiffParamNode created with an absolute value below the lower bound)";
 
-        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateAbsDiffParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
+
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
 
@@ -84,7 +89,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -92,7 +97,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -100,7 +105,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsDiffParamNode created with only one primary feature present)";
         }
         catch(const InvalidFeatureException& e)
@@ -108,7 +113,7 @@ namespace
 
         try
         {
-            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _abs_diff_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +125,7 @@ namespace
     TEST_F(AbsDiffParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _abs_diff_test = std::make_shared<AbsDiffParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_abs_diff_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_abs_node.cc b/tests/googletest/feature_creation/parameterization/test_abs_node.cc
index 6e493cbf6bb35252fa199b2a936769329ffc9f6f..8e472db1e13abd0e603098991df3bddf2a182e81 100644
--- a/tests/googletest/feature_creation/parameterization/test_abs_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_abs_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 1);
 
             _task_sizes_train = {900};
@@ -38,7 +40,8 @@ namespace
 
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::abs(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -50,19 +53,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AbsParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 1) << " (AbsParamNode created with an absolute value above the upper bound)";
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 1) << " (AbsParamNode created with an absolute value below the lower bound)";
 
-        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateAbsParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -73,7 +77,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AbsParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -81,7 +85,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e40, 1e50, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e40, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AbsParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -89,7 +93,7 @@ namespace
 
         try
         {
-            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _abs_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -101,7 +105,7 @@ namespace
     TEST_F(AbsParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+        _abs_test = std::make_shared<AbsParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_abs_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_add_node.cc b/tests/googletest/feature_creation/parameterization/test_add_node.cc
index bb5560ff51d13730ecb26c03dde2d87de2a422fa..3b8f5b26a2019ea88fb459383f90f2229784a355 100644
--- a/tests/googletest/feature_creation/parameterization/test_add_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_add_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::add(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(AddParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AddParamNode created with an absolute value above the upper bound)";
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (AddParamNode created with an absolute value below the lower bound)";
 
-        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateAddParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (AddParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (AddParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _add_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(AddParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _add_test = std::make_shared<AddParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_add_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cb_node.cc b/tests/googletest/feature_creation/parameterization/test_cb_node.cc
index 8f74c5804dd300df0c689955db5d0c586d327ba0..12e7479fed9d4072ac67981636f5897525fd39a2 100644
--- a/tests/googletest/feature_creation/parameterization/test_cb_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cb_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::cb(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CbParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbParamNode created with an absolute value above the upper bound)";
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbParamNode created with an absolute value below the lower bound)";
 
-        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateCbParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CbParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CbParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(CbParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<CbParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc b/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
index b7f5345685c4ca17692d40f1b2f2f89e916df895..4eddb999b77bcd656b5e310f310be178f4121a0d 100644
--- a/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cbrt_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::cbrt(900, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CbrtParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbrtParamNode created with an absolute value above the upper bound)";
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (CbrtParamNode created with an absolute value below the lower bound)";
 
-        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateCbrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CbrtParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CbrtParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _cbrt_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(CbrtParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _cbrt_test = std::make_shared<CbrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_cbrt_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_cos_node.cc b/tests/googletest/feature_creation/parameterization/test_cos_node.cc
index 94042cfc1bd66b39a816f61ef1930fc3221273c3..af43f0621bcf99214795165a0e80045b9f23893c 100644
--- a/tests/googletest/feature_creation/parameterization/test_cos_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_cos_node.cc
@@ -14,6 +14,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 3);
 
             _task_sizes_train = {900};
@@ -52,7 +54,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::cos(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -66,6 +68,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(CosParamNodeTest, GeneratorTest)
@@ -73,19 +76,19 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created with an absolute value above the upper bound)";
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created with an absolute value below the lower bound)";
 
-        generateCosParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created from CosNode)";
 
-        generateCosParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (CosParamNode created from SinNode)";
 
-        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateCosParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-5);
     }
@@ -96,7 +99,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -104,7 +107,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e49, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -112,7 +115,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created from CosNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +123,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (CosParamNode created from SinNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -128,7 +131,7 @@ namespace
 
         try
         {
-            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _cos_test = std::make_shared<CosParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _cos_test->value_ptr(), 90), 1e-5);
         }
         catch(const InvalidFeatureException& e)
@@ -140,7 +143,7 @@ namespace
     TEST_F(CosParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _cos_test = std::make_shared<CosParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _cos_test = std::make_shared<CosParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_cos_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_div_node.cc b/tests/googletest/feature_creation/parameterization/test_div_node.cc
index 042312b6db4d9986ca6ab4a29af6a3d9c231d951..8ea61b9070ba520a5f10f51616da7b4e9334ae77 100644
--- a/tests/googletest/feature_creation/parameterization/test_div_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_div_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::div(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(DivParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (DivParamNode created with an absolute value above the upper bound)";
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (DivParamNode created with an absolute value below the lower bound)";
 
-        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateDivParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (DivParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (DivParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _div_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(DivParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _div_test = std::make_shared<DivParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_div_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_exp_node.cc b/tests/googletest/feature_creation/parameterization/test_exp_node.cc
index dcbcabb710fff98a4824c8350aeea9f85a955983..c9df2ce71b997d1898330710548b194d93b01ae0 100644
--- a/tests/googletest/feature_creation/parameterization/test_exp_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_exp_node.cc
@@ -15,6 +15,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -55,7 +57,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::exp(900, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,28 +71,29 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(ExpParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created with an absolute value above the upper bound)";
 
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created with an absolute value below the lower bound)";
 
-        generateExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from ExpNode)";
 
-        generateExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from LogNode)";
 
-        generateExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (ExpParamNode created from NegExpNode)";
 
-        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -101,7 +104,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -109,7 +112,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -117,7 +120,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -125,7 +128,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -133,7 +136,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (ExpParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -141,7 +144,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -153,7 +156,7 @@ namespace
     TEST_F(ExpParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<ExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_inv_node.cc b/tests/googletest/feature_creation/parameterization/test_inv_node.cc
index 932b39d210f8e2f92fafeaff7b4b56c8e01539f5..496303ae2da6a5ef503a1eab6977a34d473ed100 100644
--- a/tests/googletest/feature_creation/parameterization/test_inv_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_inv_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::inv(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(InvParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (InvParamNode created with an absolute value above the upper bound)";
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (InvParamNode created with an absolute value below the lower bound)";
 
-        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateInvParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (InvParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (InvParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _inv_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(InvParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _inv_test = std::make_shared<InvParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_inv_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_log_node.cc b/tests/googletest/feature_creation/parameterization/test_log_node.cc
index 294771b4734690c51bc8e87dd982a45e826bd80f..9196ec3b55c088304831acc0fa93a51e8a0cf829 100644
--- a/tests/googletest/feature_creation/parameterization/test_log_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_log_node.cc
@@ -55,7 +55,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::log(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,6 +69,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(LogParamNodeTest, GeneratorTest)
@@ -76,22 +77,22 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created with an absolute value above the upper bound)";
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created with an absolute value below the lower bound)";
 
-        generateLogParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from ExpNode)";
 
-        generateLogParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from LogNode)";
 
-        generateLogParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (LogParamNode created from NegExpNode)";
 
-        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateLogParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -102,7 +103,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -110,7 +111,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -118,7 +119,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -126,7 +127,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -134,7 +135,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (LogParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -142,7 +143,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -154,7 +155,7 @@ namespace
     TEST_F(LogParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<LogParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_mult_node.cc b/tests/googletest/feature_creation/parameterization/test_mult_node.cc
index e2c65a8973dde30c34af5274b8eb42be926bee89..7343711b2a93be8ffa69277ba2658464b7d7e227 100644
--- a/tests/googletest/feature_creation/parameterization/test_mult_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_mult_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::mult(900, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(MultParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (MultParamNode created with an absolute value above the upper bound)";
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (MultParamNode created with an absolute value below the lower bound)";
 
-        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateMultParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 900), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (MultParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (MultParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _mult_test->value_ptr(), 900), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(MultParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _mult_test = std::make_shared<MultParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_mult_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc b/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
index 3dd9c155ee78cffc9197b914fd05151e74247dad..cb9554e1051879e8da64935e7528ba8fe2372559 100644
--- a/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_neg_exp_node.cc
@@ -15,6 +15,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -55,7 +57,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::neg_exp(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -69,29 +71,31 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(NegExpParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created with an absolute value above the upper bound)";
 
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created with an absolute value below the lower bound)";
 
-        generateNegExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from ExpNode)";
 
-        generateNegExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from LogNode)";
 
-        generateNegExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[5], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (NegExpParamNode created from NegExpNode)";
 
-        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateNegExpParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
+
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
 
@@ -101,7 +105,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -109,7 +113,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -117,7 +121,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from ExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -125,7 +129,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from LogNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -133,7 +137,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[5], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (NegExpParamNode created from NegExpNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -141,7 +145,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -153,7 +157,7 @@ namespace
     TEST_F(NegExpParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<NegExpParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sin_node.cc b/tests/googletest/feature_creation/parameterization/test_sin_node.cc
index 8801de4799770f847da45c1ae25bfcce1992a16a..75857a13c90b7e515c3000fd4ac75840acd91d9a 100644
--- a/tests/googletest/feature_creation/parameterization/test_sin_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sin_node.cc
@@ -14,6 +14,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(900, 10, 2);
 
             _task_sizes_train = {900};
@@ -52,7 +54,7 @@ namespace
             _prop = std::vector<double>(900, 0.0);
             allowed_op_funcs::sin(900, _phi[0]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -66,6 +68,7 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SinParamNodeTest, GeneratorTest)
@@ -73,19 +76,19 @@ namespace
         int feat_ind = _phi.size();
         int phi_sz = _phi.size();
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created with an absolute value above the upper bound)";
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created with an absolute value below the lower bound)";
 
-        generateSinParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[3], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created from CosNode)";
 
-        generateSinParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[4], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz) << " (SinParamNode created from SinNode)";
 
-        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _prop);
+        generateSinParamNode(_phi, _phi[0], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), phi_sz + 1) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-5);
     }
@@ -96,7 +99,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -104,7 +107,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e49, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -112,7 +115,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[3], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created from CosNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -120,7 +123,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[4], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SinParamNode created from SinNode)";
         }
         catch(const InvalidFeatureException& e)
@@ -128,7 +131,7 @@ namespace
 
         try
         {
-            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+            _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sin_test->value_ptr(), 90), 1e-5);
         }
         catch(const InvalidFeatureException& e)
@@ -140,7 +143,7 @@ namespace
     TEST_F(SinParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _prop);
+        _sin_test = std::make_shared<SinParamNode>(_phi[0], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sin_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc b/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
index cbe2a311d8ae8c5f1a9d70ca02ff3fd80646aca7..27ae7390008d2c30c514549562f358149cb54ff6 100644
--- a/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_six_pow_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sixth_pow(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SixPowParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SixPowParamNode created with an absolute value above the upper bound)";
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SixPowParamNode created with an absolute value below the lower bound)";
 
-        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSixPowParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SixPowParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SixPowParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SixPowParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<SixPowParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sq_node.cc b/tests/googletest/feature_creation/parameterization/test_sq_node.cc
index 943184d71fbc9af58be70aaa5642fe0b9eed834c..b2a0a1374b7e99bd3dc00f9d2ac693fd82c68f9d 100644
--- a/tests/googletest/feature_creation/parameterization/test_sq_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sq_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sq(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SqParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqParamNode created with an absolute value above the upper bound)";
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqParamNode created with an absolute value below the lower bound)";
 
-        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSqParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SqParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SqParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _exp_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SqParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _exp_test = std::make_shared<SqParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_exp_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc b/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
index 211f977f839f0b058c47b3dff1a01fb8f767367f..b715e738b3cc19744f2db06df2227c6dde0dfda6 100644
--- a/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sqrt_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sqrt(90, _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,20 @@ namespace
 
         double _a;
         double _alpha;
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SqrtParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqrtParamNode created with an absolute value above the upper bound)";
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SqrtParamNode created with an absolute value below the lower bound)";
 
-        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSqrtParamNode(_phi, _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-4);
     }
@@ -83,7 +86,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SqrtParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +94,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e49, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SqrtParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +102,7 @@ namespace
 
         try
         {
-            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sqrt_test->value_ptr(), 90), 1e-4);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +114,7 @@ namespace
     TEST_F(SqrtParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _sqrt_test = std::make_shared<SqrtParamNode>(_phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sqrt_test->rung(), 1);
 
diff --git a/tests/googletest/feature_creation/parameterization/test_sub_node.cc b/tests/googletest/feature_creation/parameterization/test_sub_node.cc
index 0452e4b700993356d806430f7970c4751b31a806..fccb94dac36155b8a484c4e31d12c261f4920626 100644
--- a/tests/googletest/feature_creation/parameterization/test_sub_node.cc
+++ b/tests/googletest/feature_creation/parameterization/test_sub_node.cc
@@ -12,6 +12,8 @@ namespace
     protected:
         void SetUp() override
         {
+            nlopt_wrapper::MAX_PARAM_DEPTH = 1;
+
             node_value_arrs::initialize_values_arr(90, 10, 2);
 
             _task_sizes_train = {90};
@@ -47,7 +49,7 @@ namespace
             _prop = std::vector<double>(90, 0.0);
             allowed_op_funcs::sub(90, _phi[0]->value_ptr(), _phi[1]->value_ptr(), _alpha, _a, _prop.data());
 
-            nlopt_wrapper::set_objective("regression", _prop.data(), _task_sizes_train, 1, 1);
+            _optimizer = nlopt_wrapper::get_optimizer("regression",_task_sizes_train, _prop, 1);
         }
 
         node_ptr _feat_1;
@@ -60,19 +62,21 @@ namespace
 
         double _a;
         double _alpha;
+
+        std::shared_ptr<NLOptimizer> _optimizer;
     };
 
     TEST_F(SubParamNodeTest, GeneratorTest)
     {
         int feat_ind = _phi.size();
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SubParamNode created with an absolute value above the upper bound)";
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e49, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 2) << " (SubParamNode created with an absolute value below the lower bound)";
 
-        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        generateSubParamNode(_phi, _phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
         EXPECT_EQ(_phi.size(), 3) << " (Failure to create a valid feature)";
         EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _phi.back()->value_ptr(), 90), 1e-10);
     }
@@ -83,7 +87,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e-40, _optimizer);
             EXPECT_TRUE(false) << " (SubParamNode created with an absolute value above the upper bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -91,7 +95,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e3, 1e50, _optimizer);
             EXPECT_TRUE(false) << " (SubParamNode created with an absolute value below the lower bound)";
         }
         catch(const InvalidFeatureException& e)
@@ -99,7 +103,7 @@ namespace
 
         try
         {
-            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+            _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
             EXPECT_LT(1.0 - util_funcs::r2(_prop.data(), _sub_test->value_ptr(), 90), 1e-10);
         }
         catch(const InvalidFeatureException& e)
@@ -111,7 +115,7 @@ namespace
     TEST_F(SubParamNodeTest, AttributesTest)
     {
         int feat_ind = _phi.size();
-        _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _prop);
+        _sub_test = std::make_shared<SubParamNode>(_phi[0], _phi[1], feat_ind, 1e-50, 1e50, _optimizer);
 
         EXPECT_EQ(_sub_test->rung(), 1);
 
diff --git a/tests/pytest/test_descriptor_identifier/test_regressor.py b/tests/pytest/test_descriptor_identifier/test_regressor.py
index 72faf818ea474d8ff817ac8a84f30390f0c1bde9..5b99f7ae5f8746b9d80aafb91353fe103928a693 100644
--- a/tests/pytest/test_descriptor_identifier/test_regressor.py
+++ b/tests/pytest/test_descriptor_identifier/test_regressor.py
@@ -36,7 +36,7 @@ def test_sisso_regressor():
 
     op_set = ["add", "sub", "mult", "sq", "cb", "sqrt", "cbrt"]
 
-    feat_space = generate_fs(phi_0, prop, [90], op_set, [], "regression", 2, 10)
+    feat_space = generate_fs(phi_0, prop, [95], op_set, [], "regression", 2, 10)
 
     sisso = SISSORegressor(
         feat_space,
diff --git a/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py b/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
index cf9bf8674c309ac9ab20d5b406fad1063af809fe..d4d49c5aa8c5c4380f66c840627adcf365187d2b 100644
--- a/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
+++ b/tests/pytest/test_feature_creation/test_feature_space/test_feature_space.py
@@ -9,7 +9,6 @@ from cpp_sisso import (
 
 
 def test_feature_space():
-    print("in")
     initialize_values_arr(90, 10, 10)
     phi_0 = [
         FeatureNode(
@@ -26,15 +25,11 @@ def test_feature_space():
 
     op_set = ["add", "sub", "mult", "sq", "cb", "sqrt", "cbrt"]
 
-    print("feat spac")
     feat_space = generate_fs(phi_0, prop, [90], op_set, [], "regression", 2, 10)
-    print("sis")
     feat_space.sis(prop)
 
-    print("rm")
     shutil.rmtree("feature_space/")
 
-    print("assert")
     assert feat_space.phi_selected[0].postfix_expr == "1|0|add|sq"
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py b/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
index 0c6fb9a5f87ac3e56ff75d5878816df11d5b66f9..9da86fd82e730ea2ed399cb5d87149dbd5210f7c 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_lorentizan.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     SqNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 import pandas as pd
 import numpy as np
@@ -21,17 +21,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_lorentzian():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.linspace(-20.023658, 20.23658, 900)
     test_data_1 = np.linspace(-19.98549, 19.08, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 21.4 / ((data_1 - 0.25) ** 2.0 + 7.1) - 1.478
-    set_objective("regression", prop, [900], 2, 2)
+    optimizer = get_reg_optimizer([900], prop, 2, 2, 0.5)
 
     feat_node = InvParamNode(SqNode(feat_1, 2, 1e-50, 1e50), 3, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
index 5a66f2f897b21e50eb99c43fef485a2f97aab14f..0c7dc69dd038e1b8aca1b4b0f7a38cdf6892609c 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AbsParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,18 +20,18 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_abs_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.linspace(-20, 20, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -2.3 * np.abs(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = AbsParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
-
+    feat_node.get_parameters(optimizer)
+    print(feat_node.parameters)
     assert check_feat_parmeters(feat_node, prop)
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
index 52b3474b920a31a82e14d965165544151d03958d..a46b90338e20fdce0b82a0094fdb817fa2b1ea07 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_abs_diff.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AbsDiffParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,7 +20,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_abs_diff_node():
-    initialize_values_arr(900, 100, 4)
+    initialize_values_arr(900, 100, 2)
 
     data_1 = np.linspace(-20, 20, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 100)
@@ -31,10 +31,10 @@ def test_param_abs_diff_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * np.abs(data_1 - (1.5 * data_2 + 0.8751)) - 1.2
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = AbsDiffParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
index 12cc528e434aab710f9d8a5880ba73bdb903938a..b9f02ae7b9cc8d9ac36998adb39bbd325096d48f 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_add.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     AddParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -19,7 +19,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_add_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 2)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
@@ -30,10 +30,10 @@ def test_param_add_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * (data_1 + 1.5 * data_2) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = AddParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
index ed48ef11d050aa5de43279c8a1c64d726f2d3067..ff823052e34fe2e7ab57128dc74e0ad0682ac102 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cb.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CbParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_cb_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.power(1.55 * data_1 + 0.8751, 3.0) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = CbParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
index 0ce5249ca2ca95b954328eaaec30dbca8ff02b8a..3e19cd2f2312cd330c743a46b97f1fc7e881dceb 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cbrt.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CbrtParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_cbrt_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.cbrt(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = CbrtParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
index 224a64d9f71f27fa312b678989fddecfd716cb67..7a8762e6c46b7b674136fa209870d5c97e1feaa2 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_cos.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     CosParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_cos_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -1.1 * np.cos(1.25 * data_1 + 2.13) + 0.01578
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = CosParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
index 0a53c8290c35e458cd4877a21168d2bd1c589cda..b07f0d6a4f9dee02142072a7a6fb171d72844d3e 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_div.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     DivParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,7 +20,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_div_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 2)
 
     data_1 = np.random.uniform(-2.50, 2.50, 900)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
@@ -31,10 +31,10 @@ def test_param_div_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = 4.124 * data_1 / ((data_2 + 1.8751)) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = DivParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
index 8a6123f7936a09733ff7dc40d03d705a4bf71939..782ec3201859d56d9c2d5ed07a23dcc5dbf80c38 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_exp.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     ExpParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_exp_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.random.uniform(-2.50, 2.50, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.exp(1.32 * data_1 + 0.8751) - 0.12
-    set_objective("regression", prop, [900], 10, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = ExpParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
index bc5a581b8acd0103784d22aac5237c07397a20f6..eae398a40737ca8317d63b729d6ed9823e761cc2 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_inv.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     InvParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_inv_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(1.0, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 1.0 / (1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = InvParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
index 47a4cb756f7d06ef1a40724afe33c975438c1890..46dd83279837c987d4a745fce1136e2a0aa47e4c 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_log.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     LogParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_log_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -2.014 * np.log(1.15 * data_1 + 0.1387)
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = LogParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
index 82286f75a56977b2c00e4d9f0575f584803665d0..f4044c5be5d740e5b0c5cfe1bdaca832d50edf68 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_neg_exp.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     NegExpParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,18 +20,18 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_neg_exp_node():
-    initialize_values_arr(900, 10, 4)
+    initialize_values_arr(900, 10, 1)
 
     data_1 = np.random.uniform(-2.5, 2.5, 900)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
-    prop = np.exp(-1.55 * data_1 + 0.8751) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    prop = np.exp(-1.55 * data_1 - 0.8751) - 0.12
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = NegExpParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
-
+    feat_node.get_parameters(optimizer)
+    print(feat_node.parameters)
     assert check_feat_parmeters(feat_node, prop)
 
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
index 4ea5bb80a688a6b6d5240b23e797baa55c669709..0cf5e92d28437db02374e429c720f0635fc33761 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sin.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SinParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_sin_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = -1.1 * np.sin(1.25 * data_1 + 2.13) + 0.01578
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = SinParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
index 69e5bb38a8a78ce0676c88f163119ac5e5377844..ad9667c53fe33449d96686ed6a22bc89e656f5aa 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_six_pow.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SixPowParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -27,10 +27,10 @@ def test_param_six_pow_node():
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = 1.55 * np.power(data_1 + 0.21, 6.0) - 0.12
-    set_objective("regression", prop, [900], 1, 1)
+    optimizer = get_reg_optimizer([900], prop, 1, 1, 0.5)
 
     feat_node = SixPowParamNode(feat_1, 1, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
index 77084b49529b7e1f6d03b9eb4722d3d5a3ac6b84..eafecb192eacdd6cdd73ebb40b8eb54c26a493e5 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sq.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SqParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sq_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.power(1.55 * data_1 + 0.8751, 2.0) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SqParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
index 9eb621c48974fa70bbb5328ebcc93b796fe680d9..9602d4b711fd5d38591ecf7a45f28d7808e85fa3 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sqrt.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SqrtParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -20,17 +20,17 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sqrt_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 1)
 
     data_1 = np.linspace(0.5, 20, 90)
     test_data_1 = np.linspace(0.52145, 19.99, 10)
     feat_1 = FeatureNode(0, "t_a", data_1, test_data_1, Unit())
 
     prop = np.sqrt(1.55 * data_1 + 0.8751) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SqrtParamNode(feat_1, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py b/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
index 93715635e6ca3d74de3a25afea99034dc6a87bc7..02a26979e79fd42348c1a063a55c97b0af97d24a 100644
--- a/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
+++ b/tests/pytest/test_feature_creation/test_parameterize/test_param_sub.py
@@ -3,7 +3,7 @@ from cpp_sisso import (
     SubParamNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -19,7 +19,7 @@ def check_feat_parmeters(feat, prop):
 
 
 def test_param_sub_node():
-    initialize_values_arr(90, 10, 4)
+    initialize_values_arr(90, 10, 2)
 
     data_1 = np.linspace(-20, 20, 90)
     test_data_1 = np.linspace(-19.99, 19.99, 10)
@@ -30,10 +30,10 @@ def test_param_sub_node():
     feat_2 = FeatureNode(1, "x_a", data_2, test_data_2, Unit())
 
     prop = -2.3 * (data_1 - 1.5 * data_2) - 1.2
-    set_objective("regression", prop, [90], 1, 1)
+    optimizer = get_reg_optimizer([90], prop, 1, 1, 0.5)
 
     feat_node = SubParamNode(feat_1, feat_2, 2, 1e-50, 1e50)
-    feat_node.get_parameters(prop)
+    feat_node.get_parameters(optimizer)
 
     assert check_feat_parmeters(feat_node, prop)
 
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
index 5796af4ecf940b51f26aa93651e162ff2992d5e6..7ab782dfd993a58f36f8945fa54e3ea9fdc7e768 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_abs_diff_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_abs_diff_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
index 8256cb2dbaa07bcc2806deb6cfafac2b9b58ceaf..4226c71e5a5bb12d2db0a9cd9dc146da5b15f8a4 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_abs_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_abs_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
index dfb04a64c2ea005630d1a94ba6b26096e87bafd7..0c79638e9c783a9a0123ed4e0f1a9dc6c0104def 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_add_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_add_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
index 22c9da78db38f8fd6935ce89e4c0d74685cbf31a..8ddd0bc15fb89edc566f5798e4aed631570064db 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_binary_binary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_bin_bin_model_eval():
     initialize_values_arr(90, 10, 3)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
index fc84e14803a711093574dc507ebd36cf90151da3..240d3478dfad19e7bffcbe1efab53e795ca8bb55 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_binary_unary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_bin_un_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
index 2be9338eeec9bb03b5d2bc302f88d53a7e7e7a2f..de176c9327d702bcf6a3953d1d449ca6068b0add 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cb_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cb_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
index 827e7e37bd58ce7086b75135744a75b49c08ae70..55d007795075f31d048b8a09fc8c5f55bcb87566 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cbrt_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cbrt_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
index 97b0acefd3f35586d1b7b08476f7fd9144ade21b..54d6de3e3b257eac05b42b819e36fd78012e9ee7 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_cos_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_cos_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
index 44380b6393885ef890d071a87b55655280ea0ef6..11a78d9ec9e312e236d8f8ac5cb18e3723ed33c0 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_div_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_div_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
index 6a46701d8ee9a93b6e02d4823143dc75b202b9ca..e2ee2581a230361eecd5917320ca894f6beaba75 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_exp_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_exp_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e1 - 1e1
     test_data_1 = np.random.random(10) * 2e1 - 1e1
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
index 2061d475628992729437bb6fb714963a439403fe..d770359bd69714205af534454fa4ba9a1e2c3610 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_inv_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_inv_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
index 8e1bea4612d36f7c75499b8e28ed10f5dd4a13bf..e2b5cf925fbd441901833b35cb4fd9cf0fb2c74b 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_log_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_log_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
index f8f28abbe66952975a00ddf7950cb9d8366c3c0c..9223130e4255d68923860f0ac3f6410606e9a06e 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_mult_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_mult_param_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
index 9ba26d6a62fa0667285506432ac0d271af6bc358..c2153735fa5fa9394b4561401344cc08f54b88c9 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_neg_exp_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_neg_exp_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e1 - 1e1
     test_data_1 = np.random.random(10) * 2e1 - 1e1
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
index 94a8132717e091d25533018aead47e9f77fc2ccc..bd3be3b65eeb4457f040dd24896fcb6cff8730c2 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sin_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sin_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
index fe66ad0c0bf52dce24d5f206ca9532ab44756ac1..c169fc8c56bc20aae26747eec13819c5b6182d8d 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_six_pow_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_six_pow_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
index 883a70fc39d574b9a93bc14ebf31bd92d7c10c0d..274602024834e5a16d51f18ff0feef14ddfa2ca0 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sq_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sq_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 2e4 - 1e4
     test_data_1 = np.random.random(10) * 2e4 - 1e4
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
index 8769bfda8b552b37454dfbf5e491b53d79fe66ac..65de6dc0c14906b192b95b287271388c45c1c999 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sqrt_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sqrt_param_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
index a427b4e48844cae6471f63ecd1aedb274274d11a..b6d7895251912b1140f1a862d56f6d76979a274a 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_sub_param.py
@@ -4,7 +4,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -16,7 +16,7 @@ class InvalidFeatureMade(Exception):
 
 def test_sub_parm_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
index c25947133585ed1e29f713146bfcc1ba68aac25b..f3e2f003c951051d894e06996353b9cb346a8022 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_unary_binary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_un_bin_model_eval():
     initialize_values_arr(90, 10, 2)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10
diff --git a/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py b/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
index a0024a1e87bd2e84c233d5df06e8072f86405cf6..626ce2fd58bec88442387d7e6a7a0891c3c5173d 100644
--- a/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
+++ b/tests/pytest/test_model_eval/test_param_model_node/test_unary_unary_param.py
@@ -5,7 +5,7 @@ from cpp_sisso import (
     ModelNode,
     Unit,
     initialize_values_arr,
-    set_objective,
+    get_reg_optimizer,
 )
 
 import numpy as np
@@ -17,7 +17,7 @@ class InvalidFeatureMade(Exception):
 
 def test_un_un_model_eval():
     initialize_values_arr(90, 10, 1)
-    set_objective("regression", np.zeros(90), [90], 2, 2)
+    optimizer = get_reg_optimizer([90], np.zeros(90), 2, 2, 0.5)
 
     data_1 = np.random.random(90) * 1e4 + 1e-10
     test_data_1 = np.random.random(10) * 1e4 + 1e-10