diff --git a/src/classification/LPWrapper.cpp b/src/classification/LPWrapper.cpp
index b835850e7af39aee567b18d0d1a34588cd12f107..7b10870c165e0c0b2d53563259dbfb18284c95cd 100644
--- a/src/classification/LPWrapper.cpp
+++ b/src/classification/LPWrapper.cpp
@@ -341,7 +341,6 @@ void LPWrapper::set_n_overlap(const std::vector<double*> val_ptrs, const std::ve
 {
     _n_overlap = 0;
     _n_overlap_test = 0;
-
     for(int cc = 0; cc < _n_class; ++cc)
     {
         _n_col = _n_row_per_class[cc];
diff --git a/src/classification/SVMWrapper.cpp b/src/classification/SVMWrapper.cpp
index 97a0e1a04eef6e353f7ffcb207281832aaf9061e..b70d501733eb6cfd723f8d131776f5796d903360 100644
--- a/src/classification/SVMWrapper.cpp
+++ b/src/classification/SVMWrapper.cpp
@@ -15,7 +15,6 @@ SVMWrapper::SVMWrapper(const int n_class, const int n_dim, const int n_samp, con
     _n_samp(n_samp),
     _n_class(n_class)
 {
-    // std::copy_n(prop, _n_samp, _y.data());
     setup_parameter_obj(_C);
     setup_x_space();
 
@@ -62,7 +61,6 @@ SVMWrapper::SVMWrapper(const double C, const int n_class, const int n_dim, const
     _n_samp(n_samp),
     _n_class(n_class)
 {
-    // std::copy_n(prop, _n_samp, _y.data());
     setup_parameter_obj(_C);
     setup_x_space();
 
diff --git a/src/descriptor_identifier/Model/ModelClassifier.cpp b/src/descriptor_identifier/Model/ModelClassifier.cpp
index 84cef4086501cb448407a24692c3c135d310dcf4..1be29cbbe90dba37aa2a26c452cc458e1f5598bb 100644
--- a/src/descriptor_identifier/Model/ModelClassifier.cpp
+++ b/src/descriptor_identifier/Model/ModelClassifier.cpp
@@ -9,7 +9,8 @@ ModelClassifier::ModelClassifier(
 ) :
     Model(prop_label, prop_unit, loss, feats, leave_out_inds),
     _train_n_convex_overlap(0),
-    _test_n_convex_overlap(0)
+    _test_n_convex_overlap(0),
+    _n_class(loss->n_class())
 {
     _prop_train_est.reserve(_n_samp_train);
     _prop_test_est.reserve(_n_samp_test);
diff --git a/src/loss_function/LossFunction.hpp b/src/loss_function/LossFunction.hpp
index e13668d40c6f8d75c4ca1cf8aede9fed5c42a340..611d3023b5c9e2851b24061b2e9c62e0f538bdea 100644
--- a/src/loss_function/LossFunction.hpp
+++ b/src/loss_function/LossFunction.hpp
@@ -145,6 +145,9 @@ public:
     inline int n_dim() const {return _n_dim;}
 
     virtual inline void set_nfeat(int n_feat){_n_feat = n_feat; _n_dim = n_feat + (!_fix_intercept);}
+
+    virtual inline int n_class(){return 0;}
+
 };
 
 #endif
diff --git a/src/loss_function/LossFunctionConvexHull.cpp b/src/loss_function/LossFunctionConvexHull.cpp
index 792f9d12683fcf6ccaa1517f7612a9ebd7c56d76..62940d4a65c45abb841f13d9475fa5a564d1fafe 100644
--- a/src/loss_function/LossFunctionConvexHull.cpp
+++ b/src/loss_function/LossFunctionConvexHull.cpp
@@ -150,13 +150,13 @@ void LossFunctionConvexHull::setup_lp()
             std::vector<int> n_test_samp_per_class(n_samp_per_class.size(), 0);
             inds.resize(_task_sizes_test[tt]);
             std::iota(inds.begin(), inds.end(), task_start_test);
-            util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), &_prop_test[task_start]);
+            util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), &_prop_test[task_start_test]);
 
             cls_start = 0;
-            _test_sample_inds_to_sorted_dmat_inds[inds[0]] = task_start;
+            _test_sample_inds_to_sorted_dmat_inds[inds[0]] = task_start_test;
             for(int ii = 1; ii < inds.size(); ++ii)
             {
-                _test_sample_inds_to_sorted_dmat_inds[inds[ii]] = ii + task_start;
+                _test_sample_inds_to_sorted_dmat_inds[inds[ii]] = ii + task_start_test;
                 if(_prop_test[inds[ii]] != _prop_test[inds[ii - 1]])
                 {
                     n_test_samp_per_class[
@@ -200,10 +200,6 @@ void LossFunctionConvexHull::setup_lp()
 
 void LossFunctionConvexHull::reset_projection_prop(const std::vector<std::vector<model_node_ptr>>& models)
 {
-    _n_feat = models.back().size() + 1;
-    _n_dim =  _n_feat;
-    _n_project_prop = models.size();
-
     _projection_prop.resize(_n_samp * _n_project_prop);
     for(int mm = 0; mm < _n_project_prop; ++mm)
     {
@@ -211,6 +207,9 @@ void LossFunctionConvexHull::reset_projection_prop(const std::vector<std::vector
         std::copy_n(_error_train.data(), _error_train.size(), &_projection_prop[mm * _n_samp]);
     }
 
+    _n_feat = models.back().size() + 1;
+    _n_dim =  _n_feat;
+    _n_project_prop = models.size();
     setup_lp();
 }
 
@@ -226,14 +225,11 @@ double LossFunctionConvexHull::operator()(const std::vector<int>& inds)
 
 double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& feats)
 {
-    std::cout << _n_feat << '\t' << _n_dim << std::endl;
     std::vector<std::vector<double>> sorted_values(feats.size(), std::vector<double>(_n_samp, 0.0));
     std::vector<std::vector<double>> sorted_test_values(feats.size(), std::vector<double>(_n_samp_test, 0.0));
 
-    std::cout << "sort" << std::endl;
     for(int ff = 0; ff < _n_dim; ++ff)
     {
-        std::cout << "train" << std::endl;
         double* val_ptr = feats[ff]->value_ptr();
         std::for_each(
             _sample_inds_to_sorted_dmat_inds.begin(),
@@ -241,24 +237,20 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
             [&sorted_values, ff, val_ptr](auto& iter){sorted_values[ff][iter.second] = val_ptr[iter.first];}
         );
 
-        std::cout << "test" << std::endl;
         val_ptr = feats[ff]->test_value_ptr();
+
         std::for_each(
             _test_sample_inds_to_sorted_dmat_inds.begin(),
             _test_sample_inds_to_sorted_dmat_inds.end(),
             [&sorted_test_values, ff, val_ptr](auto& iter){sorted_test_values[ff][iter.second] = val_ptr[iter.first];}
         );
-        std::cout << "out" << std::endl;
     }
-    std::cout << "out loop" << std::endl;
+
     int start = 0;
     int start_test = 0;
     double n_convex_overlap = 0.0;
-    std::cout << "make error vec train" << std::endl;
     std::vector<double> err_train(_n_samp);
-    std::cout << "make error vec test" << std::endl;
     std::vector<double> err_test(_n_samp_test);
-    std::cout << "n over" << std::endl;
     for(int tt = 0; tt < _n_task; ++tt)
     {
         std::vector<double*> val_ptrs(feats.size());
@@ -267,13 +259,14 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
             sorted_values.begin(),
             sorted_values.end(),
             val_ptrs.begin(),
-            [start](std::vector<double> sv){return &sv[start];}
+            [start](std::vector<double>& sv){return &sv[start];}
         );
+
         std::transform(
             sorted_test_values.begin(),
             sorted_test_values.end(),
             test_val_ptrs.begin(),
-            [start_test](std::vector<double> sv){return &sv[start_test];}
+            [start_test](std::vector<double>& sv){return &sv[start_test];}
         );
         _lp[tt].set_n_overlap(val_ptrs, test_val_ptrs, &err_train[start], &err_test[start_test]);
         n_convex_overlap += static_cast<double>(_lp[tt].n_overlap());
@@ -282,7 +275,6 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
         start_test += _task_sizes_test[tt];
     }
 
-    std::cout << "error" << std::endl;
     std::for_each(
         _sample_inds_to_sorted_dmat_inds.begin(),
         _sample_inds_to_sorted_dmat_inds.end(),
@@ -309,7 +301,6 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
         [](double err, double real){return (err < 1e-10) ? -1 : real;}
     );
 
-    std::cout << "ret" << std::endl;
     return n_convex_overlap;
 }
 
diff --git a/src/loss_function/LossFunctionConvexHull.hpp b/src/loss_function/LossFunctionConvexHull.hpp
index a25431590ee2117be522f3d03ee1a4824c003f98..e43c4a9e21cf1723bf0ef40203a3481bad059cda 100644
--- a/src/loss_function/LossFunctionConvexHull.hpp
+++ b/src/loss_function/LossFunctionConvexHull.hpp
@@ -135,6 +135,8 @@ public:
 
     void set_nfeat(int n_feat);
 
+    inline int n_class(){return _n_class;}
+
 };
 
 #endif
diff --git a/src/loss_function/LossFunctionLogPearsonRMSE.cpp b/src/loss_function/LossFunctionLogPearsonRMSE.cpp
index b3536fcfbfae4d9426bc6404e32d3b287c220585..6a6ac81b6c6b5f78cf4a45c429b1e211f03ae740 100644
--- a/src/loss_function/LossFunctionLogPearsonRMSE.cpp
+++ b/src/loss_function/LossFunctionLogPearsonRMSE.cpp
@@ -46,7 +46,7 @@ void LossFunctionLogPearsonRMSE::set_a(const std::vector<int>& inds, int taskind
     }
 }
 
-void LossFunctionLogPearsonRMSE::set_a(const std::vector<node_ptr>& feats, int taskind, int start)
+void LossFunctionLogPearsonRMSE::set_a(const std::vector<model_node_ptr>& feats, int taskind, int start)
 {
     for(int ff = 0; ff < feats.size(); ++ff)
     {
@@ -81,12 +81,12 @@ void LossFunctionLogPearsonRMSE::set_error(const std::vector<int>& inds, int tas
     daxpy_(_task_sizes_train[taskind], 1.0, &_prop_train[start], 1, &_error_train[start], 1);
 }
 
-void LossFunctionLogPearsonRMSE::set_error(const std::vector<node_ptr>& feats, int taskind, int start)
+void LossFunctionLogPearsonRMSE::set_error(const std::vector<model_node_ptr>& feats, int taskind, int start)
 {
     std::fill_n(
         _error_train.begin() + start,
         _task_sizes_train[taskind],
-        -1.0 * (!_fix_intercept) * _coefs[(taskind + 1) * _n_feat - 1]
+        -1.0 * (!_fix_intercept) * _coefs[(taskind + 1) * _n_dim - 1]
     );
 
     for(int ff = 0; ff < feats.size(); ++ff)
@@ -102,7 +102,7 @@ void LossFunctionLogPearsonRMSE::set_error(const std::vector<node_ptr>& feats, i
     daxpy_(_task_sizes_train[taskind], 1.0, &_prop_train[start], 1, &_error_train[start], 1);
 }
 
-void LossFunctionLogPearsonRMSE::set_test_error(const std::vector<node_ptr>& feats, int taskind, int start)
+void LossFunctionLogPearsonRMSE::set_test_error(const std::vector<model_node_ptr>& feats, int taskind, int start)
 {
     std::fill_n(
         _error_test.begin() + start,
diff --git a/src/loss_function/LossFunctionLogPearsonRMSE.hpp b/src/loss_function/LossFunctionLogPearsonRMSE.hpp
index 74f3eadc79ae5e2124a43c7fa05a2b53ca2baaa3..ef0b0d58c6e76f156fe53a7daf65adfbe2e87d98 100644
--- a/src/loss_function/LossFunctionLogPearsonRMSE.hpp
+++ b/src/loss_function/LossFunctionLogPearsonRMSE.hpp
@@ -83,7 +83,7 @@ public:
      *
      * @param feats The features used to evaluate the loss function
      */
-    void set_a(const std::vector<node_ptr>& feats, int taskind, int start);
+    void set_a(const std::vector<model_node_ptr>& feats, int taskind, int start);
 
     /**
      * @brief Set the error and return the RMSE
@@ -99,7 +99,7 @@ public:
      * @param feats The features used to evaluate the loss function for the test set
      * @return The test RMSE of the feature
      */
-    void set_test_error(const std::vector<node_ptr>& feats, int taskind, int start);
+    void set_error(const std::vector<model_node_ptr>& feats, int taskind, int start);
 
     /**
      * @brief Set the error and return the RMSE
@@ -107,7 +107,7 @@ public:
      * @param feats The features used to evaluate the loss function
      * @return The RMSE of the model
      */
-    void set_error(const std::vector<node_ptr>& feats, int taskind, int start);
+    void set_test_error(const std::vector<model_node_ptr>& feats, int taskind, int start);
 
     inline LOSS_TYPE type() const {return LOSS_TYPE::LOG_PEARSON_RMSE;}
 };
diff --git a/src/loss_function/LossFunctionPearsonRMSE.cpp b/src/loss_function/LossFunctionPearsonRMSE.cpp
index 2703e631954bf0dc8b33266b21f4f39c1401ae45..f3cc7bd466e8c826162b7c0e0d81b2210d0a6d15 100644
--- a/src/loss_function/LossFunctionPearsonRMSE.cpp
+++ b/src/loss_function/LossFunctionPearsonRMSE.cpp
@@ -14,6 +14,7 @@ LossFunctionPearsonRMSE::LossFunctionPearsonRMSE(
     _coefs(_n_task * _n_dim, 0.0),
     _lwork(0)
 {
+    set_nfeat(_n_feat);
     set_opt_lwork();
 }
 
@@ -25,6 +26,7 @@ LossFunctionPearsonRMSE::LossFunctionPearsonRMSE(std::shared_ptr<LossFunction> o
     _coefs(_n_task * _n_dim, 0.0),
     _lwork(0)
 {
+    set_nfeat(_n_feat);
     set_opt_lwork();
 }
 
@@ -43,8 +45,6 @@ void LossFunctionPearsonRMSE::set_nfeat(int n_feat)
 
 void LossFunctionPearsonRMSE::reset_projection_prop(const std::vector<std::vector<model_node_ptr>>& models)
 {
-    set_nfeat(models.back().size() + 1);
-    _n_project_prop = models.size();
 
     _projection_prop.resize(_n_samp * _n_project_prop);
     for(int mm = 0; mm < _n_project_prop; ++mm)
@@ -53,6 +53,9 @@ void LossFunctionPearsonRMSE::reset_projection_prop(const std::vector<std::vecto
         std::copy_n(_error_train.data(), _error_train.size(), &_projection_prop[mm * _n_samp]);
     }
 
+    set_nfeat(models.back().size() + 1);
+    _n_project_prop = models.size();
+
 }
 
 void LossFunctionPearsonRMSE::set_opt_lwork()
@@ -190,7 +193,6 @@ double LossFunctionPearsonRMSE::operator()(const std::vector<model_node_ptr>& fe
     int start = 0;
     int tt = 0;
     int info = 0;
-
     while((tt < _n_task) && (info == 0))
     {
         set_a(feats, tt, start);
@@ -258,7 +260,7 @@ void LossFunctionPearsonRMSE::set_a(const std::vector<model_node_ptr>& feats, in
     {
         std::copy_n(
             feats[ff]->value_ptr() + start,
-            _task_sizes_train[start],
+            _task_sizes_train[taskind],
             &_a[ff * _task_sizes_train[taskind] + start * _n_dim]
         );
     }
@@ -311,7 +313,7 @@ void LossFunctionPearsonRMSE::set_error(const std::vector<model_node_ptr>& feats
     std::fill_n(
         _error_train.begin() + start,
         _task_sizes_train[taskind],
-        -1 * (!_fix_intercept) * _coefs[(taskind + 1) * _n_feat - 1]
+        -1 * (!_fix_intercept) * _coefs[(taskind + 1) * _n_dim - 1]
     );
 
     for(int ff = 0; ff < feats.size(); ++ff)