Commit 4bf89af9 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Bug Fix Classification

Multi-task where not all classes are defined in all classes now works
parent f3fb5698
......@@ -32,30 +32,27 @@ ConvexHull1D::ConvexHull1D() :
_n_class(0)
{}
ConvexHull1D::ConvexHull1D(std::vector<int> sizes, const double* prop) :
_sorted_value(std::accumulate(sizes.begin(), sizes.end(), 0), 0.0),
ConvexHull1D::ConvexHull1D(std::vector<int> task_sizes, const double* prop) :
_sorted_value(std::accumulate(task_sizes.begin(), task_sizes.end(), 0), 0.0),
_cls_max(),
_cls_min(),
_sorted_prop_inds(std::accumulate(sizes.begin(), sizes.end(), 0), 0),
_sorted_prop_inds(std::accumulate(task_sizes.begin(), task_sizes.end(), 0), 0),
_cls_start(),
_cls_sz(),
_n_task(sizes.size()),
_n_task(task_sizes.size()),
_n_class(0)
{
initialize_prop(sizes, prop);
initialize_prop(task_sizes, prop);
}
void ConvexHull1D::initialize_prop(std::vector<int> sizes, const double* prop)
void ConvexHull1D::initialize_prop(std::vector<int> task_sizes, const double* prop)
{
// Set the number of tasks and samples
_n_task = sizes.size();
_n_task = task_sizes.size();
_task_scores.resize(_n_task, 0.0);
int n_samp = std::accumulate(sizes.begin(), sizes.end(), 0);
int n_samp = std::accumulate(task_sizes.begin(), task_sizes.end(), 0);
// Set number of classes
std::vector<double> unique_prop_vals;
for(int pp = 0; pp < n_samp; ++pp)
{
......@@ -74,6 +71,9 @@ void ConvexHull1D::initialize_prop(std::vector<int> sizes, const double* prop)
_cls_sz.resize(_n_class * _n_task, 0);
_cls_start.resize(_n_class * _n_task, 0);
std::fill_n(_cls_sz.data(), _cls_sz.size(), 0);
std::fill_n(_cls_start.data(), _cls_start.size(), 0);
// Set the values of the cls vectors and sorted inds
_sorted_value.resize(n_samp, 0.0);
_sorted_prop_inds.resize(n_samp, 0);
......@@ -87,15 +87,15 @@ void ConvexHull1D::initialize_prop(std::vector<int> sizes, const double* prop)
int start = 0;
for(int tt = 0; tt < sizes.size(); ++tt)
for(int tt = 0; tt < task_sizes.size(); ++tt)
{
util_funcs::argsort<double>(
_sorted_prop_inds.data() + start,
_sorted_prop_inds.data() + start + sizes[tt],
_sorted_prop_inds.data() + start + task_sizes[tt],
prop
);
for(int ind = start; ind < start + sizes[tt]; ++ind)
for(int ind = start; ind < start + task_sizes[tt]; ++ind)
{
++_cls_sz[tt * _n_class + cl_ind[prop[ind]]];
}
......@@ -143,7 +143,9 @@ double ConvexHull1D::overlap_1d(double* value, double width)
for(int c1 = 0; c1 < _n_class; ++c1)
{
if(_cls_sz[tt * _n_class + c1] == 0)
{
continue;
}
double min = _cls_min[tt * _n_class + c1];
double max = _cls_max[tt * _n_class + c1];
......
......@@ -51,10 +51,10 @@ public:
/**
* @brief Constructor
*
* @param sizes The size of each task
* @param task_sizes The size of each task
* @param prop The pointer to the property vector
*/
ConvexHull1D(std::vector<int> sizes, const double* prop);
ConvexHull1D(std::vector<int> task_sizes, const double* prop);
/**
* @brief Default constructor
......@@ -64,10 +64,10 @@ public:
/**
* @brief Initialize the projection objects
*
* @param sizes The size of each task
* @param task_sizes The size of each task
* @param prop The pointer to the property vector
*/
void initialize_prop(std::vector<int> sizes, const double* prop);
void initialize_prop(std::vector<int> task_sizes, const double* prop);
/**
* @brief Calculate the projection scores of a set of features to a vector via Pearson correlation
......
......@@ -22,27 +22,8 @@
#include "classification/SVMWrapper.hpp"
SVMWrapper::SVMWrapper(const int n_class, const int n_dim, const int n_samp, const double* prop) :
_model(nullptr),
_y(prop, prop + n_samp),
_y_est(n_samp),
_x_space(n_samp * (n_dim + 1)),
_x(n_samp),
_coefs(n_class * (n_class - 1) / 2, std::vector<double>(n_dim, 0.0)),
_C(1000.0),
_intercept(n_class * (n_class - 1) / 2, 0.0),
_w_remap(n_dim, 1.0),
_b_remap(n_dim, 0.0),
_n_dim(n_dim),
_n_samp(n_samp),
_n_class(n_class)
{
setup_parameter_obj(_C);
setup_x_space();
_prob.l = _n_samp;
_prob.y = _y.data();
_prob.x = _x.data();
}
SVMWrapper(1000.0, n_class, n_dim, n_samp, prop)
{}
SVMWrapper::SVMWrapper(const int n_class, const int n_dim, const std::vector<double> prop) :
SVMWrapper(n_class, n_dim, prop.size(), prop.data())
......@@ -63,6 +44,14 @@ SVMWrapper::SVMWrapper(const double C, const int n_class, const int n_dim, const
_n_samp(n_samp),
_n_class(n_class)
{
std::vector<double> unique_prop_vals = vector_utils::unique(_y);
std::map<double, double> rep_map;
for(int cc = 0; cc < _n_class; ++cc)
{
_map_prop_vals[static_cast<double>(cc)] = unique_prop_vals[cc];
rep_map[unique_prop_vals[cc]] = static_cast<double>(cc);
}
std::transform(_y.begin(), _y.end(), _y.begin(), [&](double val){return rep_map[val];});
setup_parameter_obj(_C);
setup_x_space();
......@@ -210,7 +199,10 @@ void SVMWrapper::train(const bool remap_coefs)
std::transform(_x.begin(), _x.end(), _y_est.begin(), [this](svm_node* sn){return svm_predict(_model, sn);});
_n_misclassified = 0;
for(int ss = 0; ss < _n_samp; ++ss)
{
_n_misclassified += _y[ss] != _y_est[ss];
}
std::transform(_y_est.begin(), _y_est.end(), _y_est.begin(), [this](double val){return _map_prop_vals[val];});
}
void SVMWrapper::train(const std::vector<int> inds, const int task, const bool remap_coefs)
......@@ -251,6 +243,6 @@ std::vector<double> SVMWrapper::predict(const int n_samp_test, const std::vector
x_test[ss] = &x_space_test[ss * (val_ptrs.size() + 1)];
}
std::transform(x_test.begin(), x_test.end(), y_est_test.begin(), [this](svm_node* sn){return svm_predict(_model, sn);});
std::transform(y_est_test.begin(), y_est_test.end(), y_est_test.begin(), [this](double val){return _map_prop_vals[val];});
return y_est_test;
}
......@@ -22,11 +22,13 @@
#ifndef LIBSVM_WRAPPER
#define LIBSVM_WRAPPER
#include <map>
#include <algorithm>
#include <iostream>
#include "external/libsvm/svm.h"
#include "feature_creation/node/value_storage/nodes_value_containers.hpp"
#include "utils/vector_utils.hpp"
// DocString: cls_svm_wrapper
/**
......@@ -50,6 +52,7 @@ protected:
std::vector<double> _w_remap; //!< Prefactors to convert the data to/from the preferred SVM range (0 to 1)
std::vector<double> _b_remap; //!< Prefactors to map the minimum of the features to 0.0
std::map<double, double> _map_prop_vals; //!< Map of the property values to the values used for SVM
const double _C; //!< The C parameter for the SVM calculation
const int _n_dim; //!< The number of dimensions for the SVM problem
......
......@@ -220,8 +220,8 @@ std::string ModelClassifier::write_coefs() const
{
std::stringstream coef_head_stream;
coef_head_stream << "# Decision Boundaries" << std::endl;
int n_db = _n_class * (_n_class - 1) / 2;
int task_header_w = 1 + static_cast<int>(std::floor(std::log10(n_db))) + std::max(
int n_db = _loss->n_class() * (_loss->n_class() - 1) / 2;
int task_header_w = 2 + static_cast<int>(std::floor(std::log10(_n_class))) * 3 + std::max(
6,
static_cast<int>(std::max_element(_task_names.begin(), _task_names.end(), [](std::string s1, std::string s2){return s1.size() <= s2.size();})->size())
);
......@@ -233,16 +233,19 @@ std::string ModelClassifier::write_coefs() const
}
coef_head_stream << " b" << std::endl;
int start_coefs = 0;
for(int tt = 0; tt < _task_names.size(); ++tt)
{
n_db = _loss->n_class(tt) * (_loss->n_class(tt) - 1) / 2;
for(int db = 0; db < n_db; ++db)
{
coef_head_stream << std::setw(task_header_w) << std::left << "# " + _task_names[tt] + "_" + std::to_string(db) << std::setw(2) << ", ";
for(auto& coeff : _coefs[tt * n_db + db])
coef_head_stream << std::setw(task_header_w) << std::left << "# " + _task_names[tt] + "_" + _loss->coef_labels()[start_coefs] << std::setw(2) << ", ";
for(auto& coeff : _coefs[start_coefs])
{
coef_head_stream << std::setprecision(15) << std::scientific << std::right << std::setw(22) << coeff << std::setw(2) << ", ";
}
coef_head_stream << "\n";
++start_coefs;
}
}
return coef_head_stream.str();
......
......@@ -45,7 +45,7 @@ void SISSOClassifier::setup_d_mat_transfer()
std::vector<int> inds(_task_sizes_train[tt]);
std::iota(inds.begin(), inds.end(), task_start);
util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), _loss->prop_pointer() + task_start);
util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), _loss->prop_pointer());
_sample_inds_to_sorted_dmat_inds[inds[0]] = task_start;
int cls_start = 0;
......@@ -70,7 +70,7 @@ std::array<double, 2> SISSOClassifier::svm_error(std::vector<SVMWrapper>& svm, c
dist_error += 1.0 / util_funcs::norm(coefs.data(), feat_inds.size());
}
}
dist_error /= static_cast<double>(_n_task * svm[0].n_class());
dist_error /= static_cast<double>(_n_task * _n_class);
return {error, dist_error};
}
......@@ -85,8 +85,7 @@ int SISSOClassifier::get_max_error_ind(
scores,
[this](int n_overlap, double score){return static_cast<double>(n_overlap * _n_samp * _n_class) + score;}
);
double max_dist = *std::max_element(svm_margin, svm_margin + n_models) + 0.01;
double max_dist = std::abs(*std::max_element(svm_margin, svm_margin + n_models, [](double v1, double v2){return std::abs(v1) < std::abs(v2);})) + 0.01;
std::transform(
svm_margin,
svm_margin + n_models,
......@@ -121,8 +120,8 @@ void SISSOClassifier::l0_regularization(const int n_dim)
std::vector<int> inds(n_dim, 0);
std::vector<int> min_inds(n_get_models * n_dim, -1);
std::vector<int> min_n_convex_overlap(n_get_models, std::numeric_limits<int>::max());
std::vector<double> min_svm_score(n_get_models, std::numeric_limits<double>::max());
std::vector<int> min_n_convex_overlap(n_get_models, _n_samp * _n_class * _n_class);
std::vector<double> min_svm_score(n_get_models, _n_samp * _n_class * _n_class);
std::vector<double> min_svm_margin(n_get_models, -1.0);
unsigned long long int n_interactions = 1;
......@@ -161,17 +160,17 @@ void SISSOClassifier::l0_regularization(const int n_dim)
int start = 0;
for(int tt = 0; tt < _n_task; ++tt)
{
svm_vec.push_back(SVMWrapper(_c, _n_class, _n_dim, _task_sizes_train[tt], loss_copy->prop_pointer() + start));
svm_vec.push_back(SVMWrapper(_c, _loss->n_class(tt), _n_dim, _task_sizes_train[tt], loss_copy->prop_pointer() + start));
start += _task_sizes_train[tt];
}
unsigned long long int ii_prev = 0;
#ifdef OMP45
#ifdef OMP45
#pragma omp for schedule(monotonic: dynamic)
#else
#else
#pragma omp for schedule(dynamic)
#endif
#endif
for(unsigned long long int ii = _mpi_comm->rank(); ii < n_interactions; ii += static_cast<unsigned long long int>(_mpi_comm->size()))
{
util_funcs::iterate(inds, inds.size(), ii - ii_prev);
......@@ -301,8 +300,13 @@ void SISSOClassifier::l0_regularization(const int n_dim)
void SISSOClassifier::fit()
{
for(int dd = 1; dd <= _n_dim; ++dd)
int dd = 1;
while(
(dd <= _n_dim) &&
(*std::max_element(_loss->prop_project_pointer(), _loss->prop_project_pointer() + _loss->n_prop_project() * _n_samp) > 0.0)
)
{
double start = omp_get_wtime();
_feat_space->sis(_loss);
......@@ -330,6 +334,11 @@ void SISSOClassifier::fit()
}
}
}
++dd;
}
if(dd <= _n_dim)
{
std::cerr << "WARNING: All points sperated before reaching the requested dimension." << std::endl;
}
}
......@@ -309,6 +309,11 @@ public:
*/
inline int n_task() const {return _n_task;}
/**
* @brief Number of properties to project over
*/
inline int n_prop_project(){return _n_project_prop;}
/**
* @brief Set the number of features and linear model constants to those for the new n_feat
*
......@@ -317,9 +322,26 @@ public:
virtual inline void set_nfeat(int n_feat){_n_feat = n_feat; _n_dim = n_feat + (!_fix_intercept);}
/**
* @brief The number of classes in the calculation
* @brief Number of classes in the property
*/
virtual inline std::vector<int> n_class_per_task() const {return {};}
/**
* @brief The number of classes in the calculation for a given task
*
* @param task_num The task to get the number of classes for
*/
virtual inline int n_class(int task_num) const {return 0;}
/**
* @brief The maximum number of classes in the calculation
*/
virtual inline int n_class() const {return 0;}
/**
* @brief The labels for each set of coefficients
*/
virtual inline int n_class(){return 0;}
virtual inline std::vector<std::string> coef_labels() const {return {};}
};
#endif
......@@ -30,6 +30,7 @@ LossFunctionConvexHull::LossFunctionConvexHull(
) :
LossFunction(prop_train, prop_test, task_sizes_train, task_sizes_test, false, n_feat),
_width(1e-5),
_n_class_per_task(task_sizes_train.size(), 0),
_n_class(0)
{
for(auto& pt : prop_test)
......@@ -40,7 +41,23 @@ LossFunctionConvexHull::LossFunctionConvexHull(
}
}
std::vector<double> unique_classes = vector_utils::unique<double>(prop_train);
int start = 0.0;
std::vector<double> unique_classes;
for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
{
unique_classes = vector_utils::unique<double>(prop_train.data() + start, _task_sizes_train[tt]);
_n_class_per_task[tt] = unique_classes.size();
for(int c1 = 0; c1 < unique_classes.size(); ++c1)
{
for(int c2 = c1 + 1; c2 < unique_classes.size(); ++c2)
{
_coef_labels.push_back(fmt::format("_{:.1f}_{:.1f}", unique_classes[c1], unique_classes[c2]));
}
}
start += _task_sizes_train[tt];
}
unique_classes = vector_utils::unique<double>(prop_train);
std::map<double, double> class_map;
_n_class = unique_classes.size();
for(int cc = 0; cc < _n_class; ++cc)
......@@ -95,6 +112,8 @@ LossFunctionConvexHull::LossFunctionConvexHull(
LossFunctionConvexHull::LossFunctionConvexHull(std::shared_ptr<LossFunction> o) :
LossFunction(o),
_width(1e-5),
_coef_labels(o->coef_labels()),
_n_class_per_task(o->n_class_per_task()),
_n_class(o->n_class())
{
set_nfeat(_n_feat);
......@@ -106,7 +125,8 @@ void LossFunctionConvexHull::set_nfeat(int n_feat, bool initialize_sorted_d_mat)
_n_feat = n_feat;
_n_dim = n_feat + 1;
setup_lp(initialize_sorted_d_mat);
_coefs.resize(_n_class * (_n_class - 1) / 2 * _n_dim * _task_sizes_train.size(), 0.0);
int n_class_combos = std::accumulate(_n_class_per_task.begin(), _n_class_per_task.end(), 0, [](int tot, int nc){return tot + nc * (nc - 1) / 2;});
_coefs.resize(n_class_combos * _n_dim, 0.0);
}
void LossFunctionConvexHull::prepare_project()
......@@ -157,60 +177,44 @@ void LossFunctionConvexHull::setup_lp(bool initialize_sorted_d_mat)
int task_start_test = 0;
_lp = {};
std::vector<int> n_samp_per_class;
for(int tt = 0; tt < _n_task; ++tt)
std::map<double, int> rep_map;
std::vector<double> unique_classes = vector_utils::unique(_prop_train);
std::sort(unique_classes.begin(), unique_classes.end());
for(int uc = 0; uc < unique_classes.size(); ++uc)
{
std::map<double, int> rep_class;
rep_map[unique_classes[uc]] = uc;
}
std::vector<int> n_samp_per_class(_n_class * _n_task);
for(int tt = 0; tt < _n_task; ++tt)
{
std::vector<int> inds(_task_sizes_train[tt]);
std::iota(inds.begin(), inds.end(), task_start);
util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), _prop_train.data());
int cls_start = 0;
_sample_inds_to_sorted_dmat_inds[inds[0]] = task_start;
rep_class[_prop_train[inds[0]]] = 0;
for(int ii = 1; ii < inds.size(); ++ii)
for(int ii = 0; ii < inds.size(); ++ii)
{
_sample_inds_to_sorted_dmat_inds[inds[ii]] = ii + task_start;
if(_prop_train[inds[ii]] != _prop_train[inds[ii - 1]])
{
n_samp_per_class.push_back(ii - cls_start);
rep_class[_prop_train[inds[ii]]] = n_samp_per_class.size();
cls_start = ii;
}
}
n_samp_per_class.push_back(inds.size() - cls_start);
if(n_samp_per_class.size() != (tt + 1) * _n_class)
{
throw std::logic_error("A class is not represented in task " + std::to_string(tt) + ".");
++n_samp_per_class[tt * _n_class + rep_map[_prop_train[inds[ii]]]];
}
std::vector<int> samp_per_class(_n_class);
std::copy_n(n_samp_per_class.begin() + tt * _n_class, _n_class, samp_per_class.begin());
task_start += _task_sizes_train[tt];
std::vector<int> n_samp_test_per_class(n_samp_per_class.size(), 0);
std::vector<int> samp_test_per_class(samp_per_class.size(), 0);
if(_task_sizes_test[tt] > 0)
{
inds.resize(_task_sizes_test[tt]);
std::iota(inds.begin(), inds.end(), task_start_test);
util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), &_prop_test[task_start_test]);
util_funcs::argsort<double>(inds.data(), inds.data() + inds.size(), _prop_test.data());
cls_start = 0;
_test_sample_inds_to_sorted_dmat_inds[inds[0]] = task_start_test;
for(int ii = 1; ii < inds.size(); ++ii)
for(int ii = 0; ii < inds.size(); ++ii)
{
_test_sample_inds_to_sorted_dmat_inds[inds[ii]] = ii + task_start_test;
if(_prop_test[inds[ii]] != _prop_test[inds[ii - 1]])
{
n_samp_test_per_class[
rep_class[_prop_test[inds[ii - 1]]]
] = ii - cls_start;
cls_start = ii;
}
++samp_test_per_class[rep_map[_prop_test[inds[ii]]]];
}
n_samp_test_per_class[rep_class[_prop_test[inds.back()]]] = inds.size() - cls_start;
task_start_test += _task_sizes_test[tt];
}
LPWrapper lp(
......@@ -220,8 +224,8 @@ void LossFunctionConvexHull::setup_lp(bool initialize_sorted_d_mat)
_n_feat,
std::accumulate(n_samp_per_class.begin() + tt * _n_class, n_samp_per_class.end(), 0),
_width,
n_samp_test_per_class,
std::accumulate(n_samp_test_per_class.begin(), n_samp_test_per_class.end(), 0)
samp_test_per_class,
std::accumulate(samp_test_per_class.begin(), samp_test_per_class.end(), 0)
);
_lp.push_back(lp);
}
......@@ -342,9 +346,10 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
// Perform SVM to set estimated prop
start = 0;
start_test = 0;
int start_coefs = 0;
for(int tt = 0; tt < _task_sizes_train.size(); ++tt)
{
SVMWrapper svm(_n_class, _n_feat, _task_sizes_train[tt], &_prop_train[start]);
SVMWrapper svm(_n_class_per_task[tt], _n_feat, _task_sizes_train[tt], &_prop_train[start]);
std::vector<double*> node_val_ptrs(_n_feat);
std::vector<double*> node_test_val_ptrs(_n_feat);
......@@ -352,19 +357,19 @@ double LossFunctionConvexHull::operator()(const std::vector<model_node_ptr>& fea
for(int dd = 0; dd < _n_feat; ++dd)
{
node_val_ptrs[dd] = feats[dd]->value_ptr() + start;
node_test_val_ptrs[dd] = feats[dd]->test_value_ptr() + start;
node_test_val_ptrs[dd] = feats[dd]->test_value_ptr() + start_test;
}
svm.train(node_val_ptrs);
std::vector<std::vector<double>> coefs = svm.coefs();
for(int cc = 0; cc < coefs.size(); ++cc)
{
std::copy_n(coefs[cc].data(), coefs[cc].size(), &_coefs[tt * _n_class * (_n_class - 1) / 2 * _n_dim + cc * _n_dim]);
_coefs[tt * _n_class * (_n_class - 1) / 2 * _n_dim + cc * _n_dim + _n_feat] = svm.intercept()[cc];
std::copy_n(coefs[cc].data(), coefs[cc].size(), &_coefs[start_coefs * _n_dim]);
_coefs[start_coefs * _n_dim + _n_feat] = svm.intercept()[cc];
++start_coefs;
}
std::copy_n(svm.y_estimate().begin() + start, _task_sizes_train[tt], _prop_train_est.begin() + start);
std::copy_n(svm.predict(_task_sizes_test[tt], node_test_val_ptrs).begin() + start_test, _task_sizes_test[tt], _prop_test_est.begin() + start_test);
std::copy_n(svm.y_estimate().begin(), _task_sizes_train[tt], _prop_train_est.begin() + start);
std::copy_n(svm.predict(_task_sizes_test[tt], node_test_val_ptrs).begin(), _task_sizes_test[tt], _prop_test_est.begin() + start_test);
start += _task_sizes_train[tt];
start_test += _task_sizes_test[tt];
......
......@@ -22,6 +22,8 @@
#ifndef LOSS_FUNCTION_CONVEX_HULL
#define LOSS_FUNCTION_CONVEX_HULL
#include <fmt/core.h>
#include "classification/ConvexHull1D.hpp"
#include "classification/LPWrapper.hpp"
#include "classification/SVMWrapper.hpp"
......@@ -43,9 +45,11 @@ protected:
std::vector<double> _scores; //!< The scores for each of the projection properties
std::map<int, int> _sample_inds_to_sorted_dmat_inds; //!< map from input sample inds to the SORTED_D_MATRIX_INDS
std::map<int, int> _test_sample_inds_to_sorted_dmat_inds; //!< map from input sample inds to the SORTED_D_MATRIX_INDS
std::vector<std::string> _coef_labels; //!< The labels for each set of coefficients
std::vector<int> _n_class_per_task; //!< Number of classes in the property
const double _width; //!< The width used as the tolerance for the LP optimization
int _n_class; //!< Number of classes in the property
const double _width; //!< The width used as the tolerance for the LP optimization
public:
......@@ -159,9 +163,26 @@ public:
void set_nfeat(int n_feat, bool initialize_sorted_d_mat=false);
/**
* @brief The number of classes in the calculation
* @brief The number of classes in the calculation for a given task
*
* @param task_num The task to get the number of classes for
*/
inline int n_class(int task_num) const {return _n_class_per_task[task_num];}
/**
* @brief The maximum number of classes in the calculation
*/
inline int n_class() const {return _n_class;}
/**
* @brief The labels for each set of coefficients
*/
inline std::vector<std::string> coef_labels() const {return _coef_labels;}
/**
* @brief Number of classes in the property
*/
inline int n_class(){return _n_class;}
inline std::vector<int> n_class_per_task() const {return _n_class_per_task;}
};
#endif
......@@ -48,6 +48,28 @@ std::vector<T> unique(const std::vector<T> in_vec)
return out_vec;
}
/**
* @brief Return a vector of all unique elements of in_vec