Commit 43e5b950 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

LPWrapper Convex Hull test fixed

Rinitialization removed existing data leading to the problem
parent 73ac0fa6
......@@ -800,7 +800,6 @@ void FeatureSpace::generate_and_project(std::shared_ptr<LossFunction> loss, std:
#pragma omp parallel firstprivate(worst_score, worst_score_ind, scores_sel_all)
{
std::shared_ptr<LossFunction> loss_copy = loss_function_util::copy(loss);
std::cout << prop_sorted_d_mat::N_FEATURES << '\t' << prop_sorted_d_mat::SORTED_D_MATRIX.size() << '\t' << node_value_arrs::D_MATRIX.size() << std::endl;
std::vector<node_ptr> phi_sel_private(phi_sel);
std::vector<double> scores_sel_private(scores_sel);
int index_base = _phi.size() + _n_sis_select * (omp_get_thread_num() + _mpi_comm->size());
......
......@@ -228,12 +228,11 @@ void LossFunctionConvexHull::setup_lp(bool initialize_sorted_d_mat)
if((omp_get_thread_num() == 0) && initialize_sorted_d_mat)
{
prop_sorted_d_mat::initialize_sorted_d_matrix_arr(
prop_sorted_d_mat::N_FEATURES,
_n_task,
_n_class,
n_samp_per_class
);
std::cout << prop_sorted_d_mat::N_TASK << '\t' << prop_sorted_d_mat::N_CLASS << '\t' << prop_sorted_d_mat::N_FEATURES << '\t' << prop_sorted_d_mat::N_SAMPLES << '\t' << prop_sorted_d_mat::SORTED_D_MATRIX.size() << std::endl;
std::max(prop_sorted_d_mat::N_FEATURES, _n_feat),
_n_task,
_n_class,
n_samp_per_class
);
}
}
......
......@@ -124,11 +124,6 @@ namespace
std::fill_n(_prop_test.begin() + 5, 5, 1.0);
std::fill_n(_prop_test.begin() + 10, 5, 2.0);
std::fill_n(_prop_test.begin() + 15, 5, 3.0);
prop_sorted_d_mat::initialize_sorted_d_matrix_arr(2, _task_sizes_train.size(), 4, std::vector<int>(4, 20));
std::copy_n(value_1.data(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(0));
std::copy_n(value_2.data(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(1));
std::cout << prop_sorted_d_mat::access_sorted_d_matrix(1)[0] <<'\t' << prop_sorted_d_mat::access_sorted_d_matrix(0)[0] << std::endl;
}
void TearDown() override
......@@ -156,6 +151,8 @@ namespace
_task_sizes_test,
2
);
std::copy_n(_phi[0]->value_ptr(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(0));
std::copy_n(_phi[1]->value_ptr(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(1));
EXPECT_EQ(std::floor(loss.project(_phi[0])), 80);
EXPECT_EQ(std::floor(loss.project(_phi[1])), 80);
......@@ -176,6 +173,8 @@ namespace
_task_sizes_test,
2
);
std::copy_n(_phi[0]->value_ptr(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(0));
std::copy_n(_phi[1]->value_ptr(), _task_sizes_train[0], prop_sorted_d_mat::access_sorted_d_matrix(1));
LossFunctionConvexHull loss_copy(std::make_shared<LossFunctionConvexHull>(loss));
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment