diff --git a/src/feature_creation/feature_space/FeatureSpace.cpp b/src/feature_creation/feature_space/FeatureSpace.cpp
index b791dc3b46fe297ee6b236072e859f58d15963dd..98d1b362ad6be13ee1266922edd645a558d846e8 100644
--- a/src/feature_creation/feature_space/FeatureSpace.cpp
+++ b/src/feature_creation/feature_space/FeatureSpace.cpp
@@ -692,7 +692,9 @@ void FeatureSpace::project_generated(const double* prop, const int size, std::ve
         int index_base = _phi.size() + _n_sis_select * (omp_get_thread_num() + _mpi_comm->size());
 
         #ifdef PARAMETERIZE
-        std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
+        std::vector<double> prop_vec(size, 0.0);
+        std::copy_n(prop, size, prop_vec.data());
+        std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, prop_vec, _max_phi, _max_param_depth);
         #endif
 
         #pragma omp for schedule(dynamic)
diff --git a/src/nl_opt/NLOptWrapper.cpp b/src/nl_opt/NLOptWrapper.cpp
index 8747c138dfcec80b682512d1154a8cdddfb0086e..9a86b1fc2b85cb173be4337ace620c00b498aca6 100644
--- a/src/nl_opt/NLOptWrapper.cpp
+++ b/src/nl_opt/NLOptWrapper.cpp
@@ -22,17 +22,18 @@ NLOptimizer::NLOptimizer(
     _task_sizes(task_sizes),
     _n_samp(std::accumulate(task_sizes.begin(), task_sizes.end(), 0.0)),
     _n_rung(n_rung),
+    _n_prop(prop.size() / _n_samp),
     _max_params(2 * task_sizes.size()),
     _max_param_depth(std::min(n_rung, max_param_depth)),
     _local_opt_alg(local_opt_alg)
 {
-    if(prop.size() != _n_samp)
+    if(prop.size() != _n_samp * _n_prop)
     {
         throw std::logic_error(
             "Property vector size (" +
             std::to_string(_prop.size()) +
             ") and number of samples (" +
-            std::to_string(_n_samp) +
+            std::to_string(_n_samp * _n_prop) +
             ") are not the same"
         );
     }
@@ -84,9 +85,13 @@ NLOptimizerClassification::NLOptimizerClassification(
         max_param_depth,
         reset_max_param_depth,
         local_opt_alg
-    )
+    ),
+    _convex_hull(_n_prop)
 {
-    _convex_hull = std::make_shared<ConvexHull1D>(task_sizes, _prop.data());
+    for(int pp = 0; pp < _n_prop; ++pp)
+    {
+        _convex_hull[pp] = std::make_shared<ConvexHull1D>(task_sizes, &_prop[_n_samp * pp]);
+    }
 }
 
 NLOptimizerRegression::NLOptimizerRegression(
@@ -109,7 +114,7 @@ NLOptimizerRegression::NLOptimizerRegression(
         local_opt_alg
     ),
     _feature_gradient(_max_params * _n_samp, 0.0),
-    _residuals(_n_samp, 0.0),
+    _residuals(_n_samp * _n_prop, 0.0),
     _cauchy_scaling(cauchy_scaling * cauchy_scaling)
 {}
 
@@ -181,26 +186,39 @@ double NLOptimizer::optimize_feature_params(Node* feat)
     double* val_ptr = feat->value_ptr(_params.data() + _task_sizes.size() * 2);
 
     // Make a copy of the property vector to use in dgels
-    std::copy_n(_prop.data(), _n_samp, _prop_copy.data());
-    for(int tt = 0; tt < _task_sizes.size(); ++tt)
+    if(type() != DI_TYPE::CLASS)
     {
-        // Create the a matrix for dgels
-        std::fill_n(_a.data(), _a.size(), 1.0);
-        std::copy_n(val_ptr + start, _task_sizes[tt], _a.data());
-
-        // Solve for the best initial b/c for this task and update the parameter vector
-        int info = 0;
-        dgels_('N', _task_sizes[tt], 2, 1, _a.data(), _task_sizes[tt], &_prop_copy[start], _task_sizes[tt], _work.data(), _work.size(), &info);
-        if(info == 0)
+        std::copy_n(_prop.data(), _prop.size(), _prop_copy.data());
+        int min_prop_ind = 0;
+        double max_r2 =  util_funcs::r2(val_ptr, _prop.data(), _task_sizes);
+        for(int pp = 1; pp < _n_prop; ++pp)
         {
-            _params[tt * 2] = util_funcs::round2(_prop_copy[start], 33);
-            _params[tt * 2 + 1] = util_funcs::round2(_prop_copy[start + 1], 33);
+            double r2 = util_funcs::r2(val_ptr, _prop.data() + pp * _n_samp, _task_sizes);
+            if(r2 > max_r2)
+            {
+                max_r2 = r2;
+                min_prop_ind = pp;
+            }
         }
+        for(int tt = 0; tt < _task_sizes.size(); ++tt)
+        {
+            // Create the a matrix for dgels
+            std::fill_n(_a.data(), _a.size(), 1.0);
+            std::copy_n(val_ptr + start, _task_sizes[tt], _a.data());
+
+            // Solve for the best initial b/c for this task and update the parameter vector
+            int info = 0;
+            dgels_('N', _task_sizes[tt], 2, 1, _a.data(), _task_sizes[tt], &_prop_copy[min_prop_ind * _n_samp + start], _task_sizes[tt], _work.data(), _work.size(), &info);
+            if(info == 0)
+            {
+                _params[tt * 2] = util_funcs::round2(_prop_copy[min_prop_ind * _n_samp + start], 33);
+                _params[tt * 2 + 1] = util_funcs::round2(_prop_copy[min_prop_ind * _n_samp + start + 1], 33);
+            }
 
-        // Go to the next task
-        start += _task_sizes[tt];
+            // Go to the next task
+            start += _task_sizes[tt];
+        }
     }
-
     std::transform(
         _lb_init.begin(),
         _lb_init.end(),
@@ -280,35 +298,76 @@ double NLOptimizer::optimize_feature_params(Node* feat)
 double nlopt_wrapper::objective_class(unsigned int n, const double* p, double* grad, void* data)
 {
     feat_data* d = (feat_data*) data;
-    return d->_optimizer->convex_hull()->overlap_1d(d->_feat->value_ptr(p));
+    double min_overlap = HUGE_VAL;
+    for(int pp = 0; pp < d->_optimizer->n_prop(); ++pp)
+    {
+        double overlap = d->_optimizer->convex_hull(pp)->overlap_1d(d->_feat->value_ptr(p));
+        if(std::isfinite(overlap) && (overlap < min_overlap))
+        {
+            min_overlap = overlap;
+        }
+    }
+    return min_overlap;
 }
 double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* grad, void* data)
 {
     feat_data* d = (feat_data*) data;
     int n_task = d->_optimizer->task_sizes().size();
     int n_samp = d->_optimizer->n_samp();
+    int n_prop = d->_optimizer->n_prop();
     double ci = d->_optimizer->cauchy_scaling();
 
     double* val_ptr = d->_feat->value_ptr(p + 2 * n_task);
 
     int start = 0;
-    for(int tt = 0; tt < n_task; ++tt)
+    for(int pp = 0; pp < n_prop; ++pp)
     {
-        // Calculate the residual
-        std::transform(
-            val_ptr + start,
-            val_ptr + d->_optimizer->task_sizes()[tt] + start,
-            d->_prop + start,
-            d->_optimizer->residuals(start),
-            [p, tt, ci, &n_samp](double vp, double prop){
-                return (
-                    ci / n_samp *
-                    std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / ci)
-                );
-            }
-        );
+        for(int tt = 0; tt < n_task; ++tt)
+        {
+            // Calculate the residual
+            std::transform(
+                val_ptr + start,
+                val_ptr + d->_optimizer->task_sizes()[tt] + start,
+                d->_prop + start,
+                d->_optimizer->residuals(start),
+                [p, tt, ci, &n_samp](double vp, double prop){
+                    return (
+                        ci / n_samp *
+                        std::log(1 + std::pow(prop - (vp * p[2*tt] + p[2*tt + 1]), 2.0) / ci)
+                    );
+                }
+            );
+            start += d->_optimizer->task_sizes()[tt];
+        }
     }
 
+    int min_prop_ind = 0;
+    double min_res = util_funcs::round2(
+        std::accumulate(
+            d->_optimizer->residuals(0),
+            d->_optimizer->residuals(n_samp),
+            0.0,
+            [](double total, double res){return total + util_funcs::round2(res, 33);}
+        ),
+        33
+    );
+    for(int pp = 1; pp < n_prop; ++pp)
+    {
+        double res = util_funcs::round2(
+            std::accumulate(
+                d->_optimizer->residuals(pp * n_samp),
+                d->_optimizer->residuals((pp + 1) * n_samp),
+                0.0,
+                [](double total, double res){return total + util_funcs::round2(res, 33);}
+            ),
+            33
+        );
+        if((std::isfinite(res) && (res < min_res)) || !std::isfinite(min_res))
+        {
+            min_prop_ind = pp;
+            min_res = res;
+        }
+    }
     // Calculate the gradient if requested
     if(grad)
     {
@@ -317,9 +376,9 @@ double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* gra
         {
             // Calculate the difference between y and f(p, x)
             std::transform(
-                val_ptr + start,
-                val_ptr + d->_optimizer->task_sizes()[tt] + start,
-                d->_prop + start,
+                val_ptr + start + min_prop_ind * n_samp,
+                val_ptr + d->_optimizer->task_sizes()[tt] + start + min_prop_ind * n_samp,
+                d->_prop + start + min_prop_ind * n_samp,
                 d->_optimizer->feature_gradient((2 * tt + 1)*n_samp + start),
                 [p, tt](double vp, double prop){
                     return prop - (vp * p[2*tt] + p[2*tt + 1]);
@@ -338,8 +397,8 @@ double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* gra
 
             // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * f_i
             std::transform(
-                val_ptr + start,
-                val_ptr + start + d->_optimizer->task_sizes()[tt],
+                val_ptr + start + n_samp * min_prop_ind,
+                val_ptr + start + n_samp * min_prop_ind + d->_optimizer->task_sizes()[tt],
                 d->_optimizer->feature_gradient((2 * tt + 1) * n_samp + start),
                 d->_optimizer->feature_gradient((2 * tt) * n_samp + start),
                 [](double vp, double s){
@@ -387,15 +446,7 @@ double nlopt_wrapper::objective_reg(unsigned int n, const double* p, double* gra
             );
         }
     }
-    return util_funcs::round2(
-        std::accumulate(
-            d->_optimizer->residuals(0),
-            d->_optimizer->residuals(n_samp),
-            0.0,
-            [](double total, double res){return total + util_funcs::round2(res, 33);}
-        ),
-        33
-    );
+    return min_res;
 }
 
 double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double* grad, void* data)
@@ -403,27 +454,53 @@ double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double*
     feat_data* d = (feat_data*) data;
     int n_task = n_task;
     int n_samp = d->_optimizer->n_samp();
+    int n_prop = d->_optimizer->n_prop();
+
     double ci = d->_optimizer->cauchy_scaling();
 
     double* val_ptr = d->_feat->value_ptr(p + 2 * n_task);
 
     std::fill_n(d->_optimizer->feature_gradient(0), n * n_samp, 0.0);
     int start = 0;
-    for(int tt = 0; tt < n_task; ++tt)
+    for(int pp = 0; pp < n_prop; ++pp)
     {
-        // Calculate the residual
-        std::transform(
-            val_ptr + start,
-            val_ptr + d->_optimizer->task_sizes()[tt] + start,
-            d->_prop + start,
-            d->_optimizer->residuals(start),
-            [p, tt, ci, &n_samp](double vp, double prop){
-                return (
-                    ci / n_samp *
-                    std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / ci)
-                );
-            }
+        for(int tt = 0; tt < n_task; ++tt)
+        {
+            // Calculate the residual
+            std::transform(
+                val_ptr + start,
+                val_ptr + d->_optimizer->task_sizes()[tt] + start,
+                d->_prop + start,
+                d->_optimizer->residuals(start),
+                [p, tt, ci, &n_samp](double vp, double prop){
+                    return (
+                        ci / n_samp *
+                        std::log(1 + std::pow(prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]), 2.0) / ci)
+                    );
+                }
+            );
+            start += d->_optimizer->task_sizes()[tt];
+        }
+    }
+
+    int min_prop_ind = -1;
+    double min_res = HUGE_VAL;
+    for(int pp = 0; pp < n_prop; ++pp)
+    {
+        double res = util_funcs::round2(
+            std::accumulate(
+                d->_optimizer->residuals(pp * n_samp),
+                d->_optimizer->residuals((pp + 1) * n_samp),
+                0.0,
+                [](double total, double res){return total + util_funcs::round2(res, 33);}
+            ),
+            33
         );
+        if(std::isfinite(res) && (res < min_res))
+        {
+            min_prop_ind = pp;
+            min_res = res;
+        }
     }
 
     if(grad)
@@ -433,9 +510,9 @@ double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double*
             // Calculate the base of the gradient for each step
             // Contribution to the derivative from (p - (\alpha_task * log(feat_val) + a_task) )^2
             std::transform(
-                val_ptr + start,
-                val_ptr + d->_optimizer->task_sizes()[tt] + start,
-                d->_prop + start,
+                val_ptr + start + min_prop_ind * n_samp,
+                val_ptr + d->_optimizer->task_sizes()[tt] + start + min_prop_ind * n_samp,
+                d->_prop + start + min_prop_ind * n_samp,
                 d->_optimizer->feature_gradient((2 * tt + 1)*n_samp + start),
                 [p, tt](double vp, double prop){
                     return prop - (std::log(vp) * p[2*tt] + p[2*tt + 1]);
@@ -454,8 +531,8 @@ double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double*
 
             // \partial s_i/\partial \alpha_task = \partial s_i/\partial a_task * log(f_i)
             std::transform(
-                val_ptr + start,
-                val_ptr + d->_optimizer->task_sizes()[tt] + start,
+                val_ptr + start + min_prop_ind * n_samp,
+                val_ptr + d->_optimizer->task_sizes()[tt] + start + min_prop_ind * n_samp,
                 d->_optimizer->feature_gradient((2 * tt + 1) * n_samp + start),
                 d->_optimizer->feature_gradient(2 * tt * n_samp + start),
                 [](double vp, double s){
@@ -467,7 +544,7 @@ double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double*
             std::transform(
                 d->_optimizer->feature_gradient((2 * tt + 1) * n_samp + start),
                 d->_optimizer->feature_gradient((2 * tt + 1) * n_samp + start + d->_optimizer->task_sizes()[tt]),
-                val_ptr + start,
+                val_ptr + start + min_prop_ind * n_samp,
                 d->_optimizer->feature_gradient(2 * n_task * n_samp + start),
                 [p, tt](double s, double vp){return p[2 * tt] * s / vp;}
             );
@@ -502,15 +579,7 @@ double nlopt_wrapper::objective_log_reg(unsigned int n, const double* p, double*
         }
     }
 
-    return util_funcs::round2(
-        std::accumulate(
-            d->_optimizer->residuals(0),
-            d->_optimizer->residuals(n_samp),
-            0.0,
-            [](double total, double res){return total + util_funcs::round2(res, 33);}
-        ),
-        33
-    );
+    return min_res;
 }
 
 
@@ -526,15 +595,36 @@ std::shared_ptr<NLOptimizer> nlopt_wrapper::get_optimizer(
 {
     if(project_type.compare("classification") == 0)
     {
-        return std::make_shared<NLOptimizerClassification>(task_sizes, prop, n_rung, max_param_depth, reset_max_param_depth);
+        return std::make_shared<NLOptimizerClassification>(
+            task_sizes,
+            prop,
+            n_rung,
+            max_param_depth,
+            reset_max_param_depth
+        );
     }
     else if(project_type.compare("regression") == 0)
     {
-        return std::make_shared<NLOptimizerRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+        return std::make_shared<NLOptimizerRegression>(
+            task_sizes,
+            prop,
+            n_rung,
+            max_param_depth,
+            cauchy_scaling,
+            reset_max_param_depth
+        );
     }
     else if(project_type.compare("log_regression") == 0)
     {
-        return std::make_shared<NLOptimizerLogRegression>(task_sizes, prop, n_rung, max_param_depth, cauchy_scaling, reset_max_param_depth);
+
+        return std::make_shared<NLOptimizerLogRegression>(
+            task_sizes,
+            prop,
+            n_rung,
+            max_param_depth,
+            cauchy_scaling,
+            reset_max_param_depth
+        );
     }
     else
     {
diff --git a/src/nl_opt/NLOptWrapper.hpp b/src/nl_opt/NLOptWrapper.hpp
index 26052ea69fadd05fea354d07c6dbb3f03cc03241..2cb2465386dcd333b7079390b878187dc0a03d63 100644
--- a/src/nl_opt/NLOptWrapper.hpp
+++ b/src/nl_opt/NLOptWrapper.hpp
@@ -31,6 +31,7 @@ protected:
 
     const int _n_samp; //!< total number of samples
     const int _n_rung; //!< Maximum rung of the features
+    const int _n_prop; //!< Number of property vectors to optimize against
     int _max_params; //!< Maximum number of possible parameters
     int _max_param_depth; //!< parameterize features to all depths of the tree
 
@@ -92,6 +93,11 @@ public:
      */
     inline int n_rung() const {return _n_rung;}
 
+    /**
+     * @brief Accessor function to the number of property vectors
+     */
+    inline int n_prop() const {return _n_prop;}
+
     /**
      * @brief Accessor function to the maximum number of possible parameters
      */
@@ -118,7 +124,7 @@ public:
     /**
      * @brief Accessor function to the convex hull optimizers (if the projection is a classification problem)
      */
-    virtual std::shared_ptr<ConvexHull1D> convex_hull() const = 0;
+    virtual std::shared_ptr<ConvexHull1D> convex_hull(int ind) const = 0;
     /**
      * @brief Accessor function to calculate the feature gradient
      */
@@ -141,7 +147,7 @@ public:
 class NLOptimizerClassification: public NLOptimizer
 {
 protected:
-    std::shared_ptr<ConvexHull1D> _convex_hull; //!< Object to perform classification
+    std::vector<std::shared_ptr<ConvexHull1D>> _convex_hull; //!< Object to perform classification
 public:
     /**
      * @brief Constructor
@@ -163,7 +169,7 @@ public:
     /**
      * @brief Accessor function to the convex hull optimizers (if the projection is a classification problem)
      */
-    inline std::shared_ptr<ConvexHull1D> convex_hull() const {return _convex_hull;}
+    inline std::shared_ptr<ConvexHull1D> convex_hull(int ind) const {return _convex_hull[ind];}
 
     /**
      * @brief Accessor function to the feature gradient (always nullptr)
@@ -218,7 +224,7 @@ public:
     /**
      * @brief Accessor function to the convex hull optimizers (always nullptr)
      */
-    inline std::shared_ptr<ConvexHull1D> convex_hull() const {return nullptr;}
+    inline std::shared_ptr<ConvexHull1D> convex_hull(int ind) const{return nullptr;}
 
     /**
      * @brief Accessor function to the feature gradient