Skip to content
Snippets Groups Projects
Commit da41d67d authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Bug Fixes:

prepare_project done when projection_prop set
Wrong dgemm_ setting for PreasonLoss
parent b55cb2c7
Branches
Tags
No related merge requests found
......@@ -70,6 +70,7 @@ LossFunctionConvexHull::LossFunctionConvexHull(
std::copy_n(prop_test.begin(), prop_test.size(), _prop_test.begin());
setup_lp();
prepare_project();
}
LossFunctionConvexHull::LossFunctionConvexHull(std::shared_ptr<LossFunction> o) :
......@@ -78,6 +79,7 @@ LossFunctionConvexHull::LossFunctionConvexHull(std::shared_ptr<LossFunction> o)
_n_class(vector_utils::unique<double>(_prop_train).size())
{
setup_lp();
prepare_project();
}
void LossFunctionConvexHull::set_nfeat(int n_feat)
......@@ -209,9 +211,8 @@ void LossFunctionConvexHull::reset_projection_prop(const std::vector<std::vector
std::copy_n(_error_train.data(), _error_train.size(), &_projection_prop[mm * _n_samp]);
}
_n_feat = models.back().size() + 1;
_n_dim = _n_feat;
setup_lp();
set_nfeat(models.back().size() + 1);
prepare_project();
}
double LossFunctionConvexHull::operator()(const std::vector<int>& inds)
......
......
......@@ -17,6 +17,7 @@ LossFunctionPearsonRMSE::LossFunctionPearsonRMSE(
{
set_nfeat(_n_feat);
set_opt_lwork();
prepare_project();
}
LossFunctionPearsonRMSE::LossFunctionPearsonRMSE(std::shared_ptr<LossFunction> o) :
......@@ -29,6 +30,7 @@ LossFunctionPearsonRMSE::LossFunctionPearsonRMSE(std::shared_ptr<LossFunction> o
{
set_nfeat(_n_feat);
set_opt_lwork();
prepare_project();
}
void LossFunctionPearsonRMSE::set_nfeat(int n_feat)
......@@ -56,6 +58,7 @@ void LossFunctionPearsonRMSE::reset_projection_prop(const std::vector<std::vecto
set_nfeat(models.back().size() + 1);
_n_project_prop = models.size();
prepare_project();
}
void LossFunctionPearsonRMSE::set_opt_lwork()
......@@ -118,14 +121,14 @@ double LossFunctionPearsonRMSE::calc_max_pearson(double* feat_val_ptr)
{
double feat_std = util_funcs::stand_dev(feat_val_ptr + start, _task_sizes_train[tt]);
dgemm_(
'N',
'T',
'N',
_n_project_prop,
1,
_task_sizes_train[tt],
1.0 / feat_std / static_cast<double>(_task_sizes_train[tt]),
&_projection_prop[start * _n_project_prop],
_n_project_prop,
_task_sizes_train[tt],
feat_val_ptr + start,
_task_sizes_train[tt],
0.0,
......
......
......@@ -92,7 +92,6 @@ void project_funcs::project_loss(std::shared_ptr<LossFunction> loss, const std::
#pragma omp parallel
{
std::shared_ptr<LossFunction> loss_copy = loss_function_util::copy(loss);
loss_copy->prepare_project();
int start = (
omp_get_thread_num() * (feats.size() / omp_get_num_threads()) +
std::min(omp_get_thread_num(), static_cast<int>(feats.size()) % omp_get_num_threads())
......@@ -116,7 +115,6 @@ void project_funcs::project_loss(std::shared_ptr<LossFunction> loss, const std::
void project_funcs::project_loss_no_omp(std::shared_ptr<LossFunction> loss, const std::vector<node_ptr>& feats, double* scores)
{
loss->prepare_project();
std::fill_n(scores, feats.size(), std::numeric_limits<double>::max());
std::transform(
feats.begin(),
......
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment