From 7a55173001827c68a9cc7babc689c7c17844e7d7 Mon Sep 17 00:00:00 2001
From: Thomas Purcell <purcell@fhi-berlin.mpg.de>
Date: Wed, 23 Sep 2020 11:19:45 +0200
Subject: [PATCH] Update SVM to use libsvm not shark ML

Something is wrong with sharkML in some enviroments
---
 CMakeLists.txt                                |  13 +-
 .../Model/ModelClassifier.cpp                 | 309 ++++++++++--------
 .../Model/ModelClassifier.hpp                 |  10 +-
 src/external/libsvm/svm.cpp                   |  46 +--
 src/main.cpp                                  |   2 -
 .../descriptor_identifier/SISSOClassifier.cpp |  22 +-
 6 files changed, 224 insertions(+), 178 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0ca95102..277813c9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -21,7 +21,7 @@ set(CMAKE_CXX_EXTENSIONS OFF)
 
 # compiler options
 option(EXTERNAL_BOOST "Use an external boost library" OFF)
-option(USE_PYTHON "Whether to compile with python binding support" OFF)
+option(USE_PYTHON "Whether to compile with python binding support" ON)
 
 if(USE_PYTHON)
     message(STATUS "USE PYTHON True")
@@ -277,7 +277,16 @@ set(SHARK_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/external/shark/build/")
 set(SHARK_INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/external/shark/bin/")
 set(SHARK_INCLUDE_DIRS "${SHARK_INSTALL_DIR}/include")
 set(SHARK_LIBRARY_DIRS "${SHARK_INSTALL_DIR}/lib")
-set(SHARK_CMAKE_ARGS "CXX=${CMAKE_CXX_COMPILER};-DBUILD_EXAMPLES=OFF;-DBUILD_TESTING=OFF;-DBUILD_SHARED_LIBS=ON;-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER};-DCMAKE_INSTALL_PREFIX=${SHARK_INSTALL_DIR};-DCBLAS_LIBRARY_PATH=${LAPACK_LIBRARY_DIR};-DBOOST_ROOT=${Boost_INSTALL_DIR};-DCMAKE_INSTALL_LIBDIR=${SHARK_LIBRARY_DIRS}")
+set(SHARK_CMAKE_ARGS "CXX=${CMAKE_CXX_COMPILER}")
+list(APPEND SHARK_CMAKE_ARGS "-DBUILD_EXAMPLES=OFF")
+list(APPEND SHARK_CMAKE_ARGS "-DBUILD_TESTING=OFF")
+list(APPEND SHARK_CMAKE_ARGS "-DBUILD_SHARED_LIBS=ON")
+list(APPEND SHARK_CMAKE_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
+list(APPEND SHARK_CMAKE_ARGS "-DCMAKE_INSTALL_PREFIX=${SHARK_INSTALL_DIR}")
+list(APPEND SHARK_CMAKE_ARGS -DCBLAS_LIBRARY_PATH=${LAPACK_LIBRARY_DIR})
+list(APPEND SHARK_CMAKE_ARGS -DBOOST_ROOT=${Boost_INSTALL_DIR})
+list(APPEND SHARK_CMAKE_ARGS -DCMAKE_INSTALL_LIBDIR=${SHARK_LIBRARY_DIRS})
+
 ExternalProject_Add(
   external_shark
   PREFIX "external/shark"
diff --git a/src/descriptor_identifier/Model/ModelClassifier.cpp b/src/descriptor_identifier/Model/ModelClassifier.cpp
index 5056ebf5..b3a3c8f7 100644
--- a/src/descriptor_identifier/Model/ModelClassifier.cpp
+++ b/src/descriptor_identifier/Model/ModelClassifier.cpp
@@ -135,152 +135,197 @@ ModelClassifier::ModelClassifier(Unit prop_unit, std::vector<double> prop_train,
     std::transform(_prop_train_est.begin(), _prop_train_est.end(), _prop_train.begin(), _train_error.begin(), [](double est, double real){return (std::abs(est - real) < 1e-10) ? -1 : real;});
     std::transform(_prop_test_est.begin(), _prop_test_est.end(), _prop_test.begin(), _test_error.begin(), [](double est, double real){return (std::abs(est - real) < 1e-10) ? -1 : real;});
 
-    // Setup ML Shark data structures for SVM training
-    std::vector<shark::LabeledData<shark::RealVector, unsigned int>> training(_task_sizes_train.size());
-    std::vector<shark::LabeledData<shark::RealVector, unsigned int>> test(_task_sizes_test.size());
-    std::vector<unsigned int> prop_to_copy(_prop_train.size());
-    int n_els = 0;
-
-    // Populate data structures for SVM and convex hull overlap
+    // Perform SVM
+    int start = 0;
+    int start_test = 0;
     for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
     {
-        shark::Data<shark::RealVector> data(_task_sizes_train[tt], shark::RealVector(_n_dim));
-        shark::Data<unsigned int> labels(_task_sizes_train[tt], 0);
-
-        for(int ii = 0; ii < _task_sizes_train[tt]; ++ii)
-            prop_to_copy[ii] = static_cast<unsigned int>(_prop_train[ii + n_els]);
+        SVMWrapper svm(n_class, _n_dim, _task_sizes_train[tt], &_prop_train[start]);
 
-        int n_copied = 0;
-        for(auto& batch : labels.batches())
-        {
-            std::copy_n(prop_to_copy.begin() + n_copied, shark::batchSize(batch), &batch(0));
-            n_els += shark::batchSize(batch);
-            n_copied += shark::batchSize(batch);
-        }
+        std::vector<double*> node_val_ptrs(_n_dim);
+        std::vector<double*> node_test_val_ptrs(_n_dim);
 
-        n_copied = 0;
-        for(auto& batch : data.batches())
+        for(int dd = 0; dd < _n_dim; ++dd)
         {
-            for(int ff = 0; ff < _n_dim; ++ff)
-                dcopy_(shark::batchSize(batch), _feats[ff]->svm_value_ptr() + n_copied, 1, &batch(0, 0) + ff, _n_dim);
-
-            n_copied += shark::batchSize(batch);
+            node_val_ptrs[dd] = _feats[dd]->value_ptr() + start;
+            node_test_val_ptrs[dd] = _feats[dd]->test_value_ptr() + start;
         }
-        training[tt] = shark::LabeledData<shark::RealVector, unsigned int>(data, labels);
-    }
-
-    prop_to_copy.resize(_prop_test.size());
-    n_els = 0;
-
-    for(int tt = 0; tt < _task_sizes_test.size(); ++tt)
-    {
-        if(_task_sizes_test[tt] == 0)
-            continue;
-
-        shark::Data<shark::RealVector> data(_task_sizes_test[tt], shark::RealVector(_n_dim));
-        shark::Data<unsigned int> labels(_task_sizes_test[tt], 0);
 
-        for(int ii = 0; ii < _task_sizes_test[tt]; ++ii)
-            prop_to_copy[ii] = static_cast<unsigned int>(_prop_test[ii + n_els]);
+        svm.train(node_val_ptrs);
 
-        int n_copied = 0;
-        for(auto& batch : labels.batches())
+        int n_coef = 0;
+        for(auto& coef : svm.coefs())
         {
-            std::copy_n(prop_to_copy.begin() + n_copied, shark::batchSize(batch), &batch(0));
-            n_els += shark::batchSize(batch);
-            n_copied += shark::batchSize(batch);
+            _coefs.push_back(coef);
+            _coefs.back().push_back(svm.intercept()[n_coef]);
+            // for(int dd = 0; dd < _n_dim; ++dd)
+            // {
+            //     _coefs.back()[dd] = _feats[dd]->remap_w_svm(_coefs.back()[dd]);
+            //     _coefs.back()[_n_dim] -= _feats[dd]->remap_b_svm(_coefs.back()[dd]);
+            // }
+            ++n_coef;
         }
 
-        n_copied = 0;
-        for(auto& batch : data.batches())
-        {
-            for(int ff = 0; ff < _n_dim; ++ff)
-                dcopy_(shark::batchSize(batch), _feats[ff]->svm_test_value_ptr() + n_copied, 1, &batch(0, 0) + ff, _n_dim);
+        std::copy_n(svm.y_estimate().begin() + start, _task_sizes_train[tt], _prop_train_est.begin() + start);
+        std::copy_n(svm.predict(_task_sizes_test[tt], node_test_val_ptrs).begin() + start_test, _task_sizes_test[tt], _prop_test_est.begin() + start_test);
 
-            n_copied += shark::batchSize(batch);
-        }
-        test[tt] = shark::LabeledData<shark::RealVector, unsigned int>(data, labels);
+        start += _task_sizes_train[tt];
+        start_test += _task_sizes_test[tt];
     }
 
-    // Perform SVM training
-
-    // Process SVM model
-    std::shared_ptr<shark::AbstractKernelFunction<shark::RealVector>> _kernel_ptr = std::make_shared<shark::LinearKernel<shark::RealVector>>();
-    shark::CSvmTrainer<shark::RealVector> trainer(_kernel_ptr.get(), 100.0, !_fix_intercept);
-    shark::KernelClassifier<shark::RealVector> model;
-    shark::ZeroOneLoss<unsigned int> loss;
-    shark::Data<unsigned int> output;
-
-    int ii_train = 0;
-    int ii_test = 0;
-
-    for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
-    {
-        trainer.stoppingCondition().maxSeconds = 0.1;
-        trainer.train(model, training[tt]);
-        if(_fix_intercept)
-        {
-            for(int cc = 0; cc < n_class - (n_class == 2); ++cc)
-            {
-                _coefs.push_back(std::vector<double>(_n_dim, 0.0));
-                int sv = 0;
-                for(auto el : model.decisionFunction().basis().elements())
-                {
-                    daxpy_(_n_dim, model.parameterVector()(sv), &el(0), 1, _coefs.back().data(), 1);
-                    ++sv;
-                }
-                for(int dd = 0; dd < _n_dim; ++dd)
-                {
-                    _coefs.back()[dd] = _feats[dd]->remap_w_svm(_coefs.back()[dd]);
-                }
-            }
-        }
-        else
-        {
-            for(int cc = 0; cc < n_class - (n_class == 2); ++cc)
-            {
-                _coefs.push_back(std::vector<double>(_n_dim + 1, 0.0));
-                int sv = 0;
-                for(auto el : model.decisionFunction().basis().elements())
-                {
-                    daxpy_(_n_dim, model.parameterVector()(sv), &el(0), 1, _coefs.back().data(), 1);
-                    ++sv;
-                }
-                _coefs.back()[_n_dim] = model.parameterVector()(sv);
-
-                for(int dd = 0; dd < _n_dim; ++dd)
-                {
-                    _coefs.back()[dd] = _feats[dd]->remap_w_svm(_coefs.back()[dd]);
-                    _coefs.back()[_n_dim] -= _feats[dd]->remap_b_svm(_coefs.back()[dd]);
-
-                }
-            }
-        }
-        output = model(training[tt].inputs());
-        for(auto element: output.elements())
-        {
-            _prop_train_est[ii_train] = static_cast<double>(element);
-            ++ii_train;
-        }
-
-        std::vector<bool>train_misclassified(_n_samp_train);
-        std::transform(_prop_train_est.begin(), _prop_train_est.end(), _prop_train.begin(), train_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
-        _train_n_svm_misclassified = std::accumulate(train_misclassified.begin(), train_misclassified.end(), 0);
-
-        if(_task_sizes_test[tt] == 0)
-            continue;
-
-        output = model(test[tt].inputs());
-        for(auto element: output.elements())
-        {
-            _prop_test_est[ii_test] = static_cast<double>(element);
-            ++ii_test;
-        }
-        std::vector<bool>test_misclassified(_n_samp_train);
-        std::transform(_prop_test_est.begin(), _prop_test_est.end(), _prop_train.begin(), test_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
-        _test_n_svm_misclassified = std::accumulate(test_misclassified.begin(), test_misclassified.end(), 0);
-
-    }
+    std::vector<bool>train_misclassified(_n_samp_train);
+    std::transform(_prop_train_est.begin(), _prop_train_est.end(), _prop_train.begin(), train_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
+    _train_n_svm_misclassified = std::accumulate(train_misclassified.begin(), train_misclassified.end(), 0);
+
+    std::vector<bool>test_misclassified(_n_samp_test);
+    std::transform(_prop_test_est.begin(), _prop_test_est.end(), _prop_test.begin(), test_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
+    _test_n_svm_misclassified = std::accumulate(test_misclassified.begin(), test_misclassified.end(), 0);
+
+    // // Setup ML Shark data structures for SVM training
+    // std::vector<shark::LabeledData<shark::RealVector, unsigned int>> training(_task_sizes_train.size());
+    // std::vector<shark::LabeledData<shark::RealVector, unsigned int>> test(_task_sizes_test.size());
+    // std::vector<unsigned int> prop_to_copy(_prop_train.size());
+    // int n_els = 0;
+
+    // // Populate data structures for SVM and convex hull overlap
+    // for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
+    // {
+    //     shark::Data<shark::RealVector> data(_task_sizes_train[tt], shark::RealVector(_n_dim));
+    //     shark::Data<unsigned int> labels(_task_sizes_train[tt], 0);
+
+    //     for(int ii = 0; ii < _task_sizes_train[tt]; ++ii)
+    //         prop_to_copy[ii] = static_cast<unsigned int>(_prop_train[ii + n_els]);
+
+    //     int n_copied = 0;
+    //     for(auto& batch : labels.batches())
+    //     {
+    //         std::copy_n(prop_to_copy.begin() + n_copied, shark::batchSize(batch), &batch(0));
+    //         n_els += shark::batchSize(batch);
+    //         n_copied += shark::batchSize(batch);
+    //     }
+
+    //     n_copied = 0;
+    //     for(auto& batch : data.batches())
+    //     {
+    //         for(int ff = 0; ff < _n_dim; ++ff)
+    //             dcopy_(shark::batchSize(batch), _feats[ff]->svm_value_ptr() + n_copied, 1, &batch(0, 0) + ff, _n_dim);
+
+    //         n_copied += shark::batchSize(batch);
+    //     }
+    //     training[tt] = shark::LabeledData<shark::RealVector, unsigned int>(data, labels);
+    // }
+
+    // prop_to_copy.resize(_prop_test.size());
+    // n_els = 0;
+
+    // for(int tt = 0; tt < _task_sizes_test.size(); ++tt)
+    // {
+    //     if(_task_sizes_test[tt] == 0)
+    //         continue;
+
+    //     shark::Data<shark::RealVector> data(_task_sizes_test[tt], shark::RealVector(_n_dim));
+    //     shark::Data<unsigned int> labels(_task_sizes_test[tt], 0);
+
+    //     for(int ii = 0; ii < _task_sizes_test[tt]; ++ii)
+    //         prop_to_copy[ii] = static_cast<unsigned int>(_prop_test[ii + n_els]);
+
+    //     int n_copied = 0;
+    //     for(auto& batch : labels.batches())
+    //     {
+    //         std::copy_n(prop_to_copy.begin() + n_copied, shark::batchSize(batch), &batch(0));
+    //         n_els += shark::batchSize(batch);
+    //         n_copied += shark::batchSize(batch);
+    //     }
+
+    //     n_copied = 0;
+    //     for(auto& batch : data.batches())
+    //     {
+    //         for(int ff = 0; ff < _n_dim; ++ff)
+    //             dcopy_(shark::batchSize(batch), _feats[ff]->svm_test_value_ptr() + n_copied, 1, &batch(0, 0) + ff, _n_dim);
+
+    //         n_copied += shark::batchSize(batch);
+    //     }
+    //     test[tt] = shark::LabeledData<shark::RealVector, unsigned int>(data, labels);
+    // }
+
+    // // Perform SVM training
+
+    // // Process SVM model
+    // std::shared_ptr<shark::AbstractKernelFunction<shark::RealVector>> _kernel_ptr = std::make_shared<shark::LinearKernel<shark::RealVector>>();
+    // shark::CSvmTrainer<shark::RealVector> trainer(_kernel_ptr.get(), 100.0, !_fix_intercept);
+    // shark::KernelClassifier<shark::RealVector> model;
+    // shark::ZeroOneLoss<unsigned int> loss;
+    // shark::Data<unsigned int> output;
+
+    // int ii_train = 0;
+    // int ii_test = 0;
+
+    // for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
+    // {
+    //     trainer.stoppingCondition().maxSeconds = 0.1;
+    //     trainer.train(model, training[tt]);
+    //     if(_fix_intercept)
+    //     {
+    //         for(int cc = 0; cc < n_class - (n_class == 2); ++cc)
+    //         {
+    //             _coefs.push_back(std::vector<double>(_n_dim, 0.0));
+    //             int sv = 0;
+    //             for(auto el : model.decisionFunction().basis().elements())
+    //             {
+    //                 daxpy_(_n_dim, model.parameterVector()(sv), &el(0), 1, _coefs.back().data(), 1);
+    //                 ++sv;
+    //             }
+    //             for(int dd = 0; dd < _n_dim; ++dd)
+    //             {
+    //                 _coefs.back()[dd] = _feats[dd]->remap_w_svm(_coefs.back()[dd]);
+    //             }
+    //         }
+    //     }
+    //     else
+    //     {
+    //         for(int cc = 0; cc < n_class - (n_class == 2); ++cc)
+    //         {
+    //             _coefs.push_back(std::vector<double>(_n_dim + 1, 0.0));
+    //             int sv = 0;
+    //             for(auto el : model.decisionFunction().basis().elements())
+    //             {
+    //                 daxpy_(_n_dim, model.parameterVector()(sv), &el(0), 1, _coefs.back().data(), 1);
+    //                 ++sv;
+    //             }
+    //             _coefs.back()[_n_dim] = model.parameterVector()(sv);
+
+    //             for(int dd = 0; dd < _n_dim; ++dd)
+    //             {
+    //                 _coefs.back()[dd] = _feats[dd]->remap_w_svm(_coefs.back()[dd]);
+    //                 _coefs.back()[_n_dim] -= _feats[dd]->remap_b_svm(_coefs.back()[dd]);
+
+    //             }
+    //         }
+    //     }
+    //     output = model(training[tt].inputs());
+    //     for(auto element: output.elements())
+    //     {
+    //         _prop_train_est[ii_train] = static_cast<double>(element);
+    //         ++ii_train;
+    //     }
+
+    //     std::vector<bool>train_misclassified(_n_samp_train);
+    //     std::transform(_prop_train_est.begin(), _prop_train_est.end(), _prop_train.begin(), train_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
+    //     _train_n_svm_misclassified = std::accumulate(train_misclassified.begin(), train_misclassified.end(), 0);
+
+    //     if(_task_sizes_test[tt] == 0)
+    //         continue;
+
+    //     output = model(test[tt].inputs());
+    //     for(auto element: output.elements())
+    //     {
+    //         _prop_test_est[ii_test] = static_cast<double>(element);
+    //         ++ii_test;
+    //     }
+    //     std::vector<bool>test_misclassified(_n_samp_train);
+    //     std::transform(_prop_test_est.begin(), _prop_test_est.end(), _prop_train.begin(), test_misclassified.begin(), [](double p1, double p2){return p1 != p2;});
+    //     _test_n_svm_misclassified = std::accumulate(test_misclassified.begin(), test_misclassified.end(), 0);
+    // }
 
 }
 
diff --git a/src/descriptor_identifier/Model/ModelClassifier.hpp b/src/descriptor_identifier/Model/ModelClassifier.hpp
index 50749f78..d2a416b8 100644
--- a/src/descriptor_identifier/Model/ModelClassifier.hpp
+++ b/src/descriptor_identifier/Model/ModelClassifier.hpp
@@ -18,12 +18,14 @@
 #include<fstream>
 #include<iostream>
 
-#include <shark/Algorithms/Trainers/CSvmTrainer.h>
-#include <shark/Models/Kernels/LinearKernel.h>
-#include <shark/Data/Dataset.h>
-#include <shark/ObjectiveFunctions/Loss/ZeroOneLoss.h>
+// #include <shark/Algorithms/Trainers/CSvmTrainer.h>
+// #include <shark/Models/Kernels/LinearKernel.h>
+// #include <shark/Data/Dataset.h>
+// #include <shark/ObjectiveFunctions/Loss/ZeroOneLoss.h>
 
 #include <coin/ClpSimplex.hpp>
+
+#include <classification/SVMWrapper.hpp>
 #include <descriptor_identifier/Model/Model.hpp>
 #include <feature_creation/node/ModelNode.hpp>
 #include <utils/string_utils.hpp>
diff --git a/src/external/libsvm/svm.cpp b/src/external/libsvm/svm.cpp
index fa44818b..e7724d6c 100644
--- a/src/external/libsvm/svm.cpp
+++ b/src/external/libsvm/svm.cpp
@@ -475,8 +475,8 @@ void Solver::reconstruct_gradient()
 		if(is_free(j))
 			nr_free++;
 
-	if(2*nr_free < active_size)
-		info("\nWARNING: using -h 0 may be faster\n");
+	// if(2*nr_free < active_size)
+	// 	info("\nWARNING: using -h 0 may be faster\n");
 
 	if (nr_free*l > 2*active_size*(l-active_size))
 	{
@@ -569,7 +569,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
 		{
 			counter = min(l,1000);
 			if(shrinking) do_shrinking();
-			info(".");
+			// info(".");
 		}
 
 		int i,j;
@@ -579,7 +579,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
 			reconstruct_gradient();
 			// reset active set size and check
 			active_size = l;
-			info("*");
+			// info("*");
 			if(select_working_set(i,j)!=0)
 				break;
 			else
@@ -735,7 +735,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
 			// reconstruct the whole gradient to calculate objective value
 			reconstruct_gradient();
 			active_size = l;
-			info("*");
+			// info("*");
 		}
 		fprintf(stderr,"\nWARNING: reaching max number of iterations\n");
 	}
@@ -771,7 +771,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
 	si->upper_bound_p = Cp;
 	si->upper_bound_n = Cn;
 
-	info("\noptimization finished, #iter = %d\n",iter);
+	// info("\noptimization finished, #iter = %d\n",iter);
 
 	delete[] p;
 	delete[] y;
@@ -944,7 +944,7 @@ void Solver::do_shrinking()
 		unshrink = true;
 		reconstruct_gradient();
 		active_size = l;
-		info("*");
+		// info("*");
 	}
 
 	for(i=0;i<active_size;i++)
@@ -1462,8 +1462,8 @@ static void solve_c_svc(
 	for(i=0;i<l;i++)
 		sum_alpha += alpha[i];
 
-	if (Cp==Cn)
-		info("nu = %f\n", sum_alpha/(Cp*prob->l));
+	// if (Cp==Cn)
+	// 	info("nu = %f\n", sum_alpha/(Cp*prob->l));
 
 	for(i=0;i<l;i++)
 		alpha[i] *= y[i];
@@ -1513,7 +1513,7 @@ static void solve_nu_svc(
 		alpha, 1.0, 1.0, param->eps, si,  param->shrinking);
 	double r = si->r;
 
-	info("C = %f\n",1/r);
+	// info("C = %f\n",1/r);
 
 	for(i=0;i<l;i++)
 		alpha[i] *= y[i]/r;
@@ -1590,7 +1590,7 @@ static void solve_epsilon_svr(
 		alpha[i] = alpha2[i] - alpha2[i+l];
 		sum_alpha += fabs(alpha[i]);
 	}
-	info("nu = %f\n",sum_alpha/(param->C*l));
+	// info("nu = %f\n",sum_alpha/(param->C*l));
 
 	delete[] alpha2;
 	delete[] linear_term;
@@ -1625,7 +1625,7 @@ static void solve_nu_svr(
 	s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y,
 		alpha2, C, C, param->eps, si, param->shrinking);
 
-	info("epsilon = %f\n",-si->r);
+	// info("epsilon = %f\n",-si->r);
 
 	for(i=0;i<l;i++)
 		alpha[i] = alpha2[i] - alpha2[i+l];
@@ -1669,7 +1669,7 @@ static decision_function svm_train_one(
 			break;
 	}
 
-	info("obj = %f, rho = %f\n",si.obj,si.rho);
+	// info("obj = %f, rho = %f\n",si.obj,si.rho);
 
 	// output SVs
 
@@ -1693,7 +1693,7 @@ static decision_function svm_train_one(
 		}
 	}
 
-	info("nSV = %d, nBSV = %d\n",nSV,nBSV);
+	// info("nSV = %d, nBSV = %d\n",nSV,nBSV);
 
 	decision_function f;
 	f.alpha = alpha;
@@ -1805,13 +1805,13 @@ static void sigmoid_train(
 
 		if (stepsize < min_step)
 		{
-			info("Line search fails in two-class probability estimates\n");
+			// info("Line search fails in two-class probability estimates\n");
 			break;
 		}
 	}
 
-	if (iter>=max_iter)
-		info("Reaching maximal iterations in two-class probability estimates\n");
+	// if (iter>=max_iter)
+	// 	info("Reaching maximal iterations in two-class probability estimates\n");
 	free(t);
 }
 
@@ -1882,8 +1882,8 @@ static void multiclass_probability(int k, double **r, double *p)
 			}
 		}
 	}
-	if (iter>=max_iter)
-		info("Exceeds max_iter in multiclass_prob\n");
+	// if (iter>=max_iter)
+	// 	info("Exceeds max_iter in multiclass_prob\n");
 	for(t=0;t<k;t++) free(Q[t]);
 	free(Q);
 	free(Qp);
@@ -2003,7 +2003,7 @@ static double svm_svr_probability(
 		else
 			mae+=fabs(ymv[i]);
 	mae /= (prob->l-count);
-	info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae);
+	// info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae);
 	free(ymv);
 	return mae;
 }
@@ -2150,8 +2150,8 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
 
 		// group training data of the same class
 		svm_group_classes(prob,&nr_class,&label,&start,&count,perm);
-		if(nr_class == 1)
-			info("WARNING: training data in only one class. See README for details.\n");
+		// if(nr_class == 1)
+		// 	info("WARNING: training data in only one class. See README for details.\n");
 
 		svm_node **x = Malloc(svm_node *,l);
 		int i;
@@ -2270,7 +2270,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
 			nz_count[i] = nSV;
 		}
 
-		info("Total nSV = %d\n",total_sv);
+		// info("Total nSV = %d\n",total_sv);
 
 		model->l = total_sv;
 		model->SV = Malloc(svm_node *,total_sv);
diff --git a/src/main.cpp b/src/main.cpp
index 0a209454..b6b12267 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -73,9 +73,7 @@ int main(int argc, char const *argv[])
             }
         }
     }
-    std::cout << "finialize mpi" << std::endl;
 
     mpi_setup::finalize_mpi_env();
-    std::cout << "out" << std::endl;
     return 0;
 }
diff --git a/src/python/descriptor_identifier/SISSOClassifier.cpp b/src/python/descriptor_identifier/SISSOClassifier.cpp
index 9f4019e7..f4ef0d30 100644
--- a/src/python/descriptor_identifier/SISSOClassifier.cpp
+++ b/src/python/descriptor_identifier/SISSOClassifier.cpp
@@ -22,14 +22,8 @@ SISSOClassifier::SISSOClassifier(
     _lp_column_upper(_n_samp, COIN_DBL_MAX),
     _lp_column_start(_n_samp, 0),
     _lp_row((n_dim + 1) * _n_samp, 0),
-    _training(_task_sizes_train.size()),
-    _int_prop(_prop.size()),
-    _int_prop_test(_prop_test.size()),
-    _int_error(_prop.size()),
-    _kernel_ptr(std::make_shared<shark::LinearKernel<shark::RealVector>>()),
-    _trainer(_kernel_ptr.get(), 100.0, !_fix_intercept),
-    _c(1.0),
-    _width(1e-3),
+    _c(100.0),
+    _width(1.0e-5),
     _n_class(1),
     _lp_n_col(0),
     _lp_n_row(0)
@@ -64,13 +58,11 @@ SISSOClassifier::SISSOClassifier(
     _lp_column_upper(_n_samp, COIN_DBL_MAX),
     _lp_column_start(_n_samp, 0),
     _lp_row((n_dim + 1) * _n_samp, 0),
-    _training(_task_sizes_train.size()),
-    _int_prop(_prop.size()),
-    _int_prop_test(_prop_test.size()),
-    _int_error(_prop.size()),
-    _kernel_ptr(std::make_shared<shark::LinearKernel<shark::RealVector>>()),
-    _trainer(_kernel_ptr.get(), 100.0, !_fix_intercept),
-    _c(1.0)
+    _c(100.0),
+    _width(1.0e-5),
+    _n_class(1),
+    _lp_n_col(0),
+    _lp_n_row(0)
 {
     std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
     std::vector<double> prop_test_vec = python_conv_utils::from_list<double>(prop_test);
-- 
GitLab