Commit 6f8b8658 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Add test for reorder_by_params

Unit test now works
parent 7c50a993
......@@ -576,21 +576,17 @@ void FeatureSpace::generate_non_param_feats(
#ifdef PARAMETERIZE
int FeatureSpace::reorder_by_n_params(std::vector<node_ptr>& feat_set, int start)
{
std::cout << "in" << std::endl;
std::vector<int> feat_n_params(feat_set.size() - start);
std::cout << "transform 1" << std::endl;
std::transform(
feat_set.begin() + start,
feat_set.end(),
feat_n_params.begin(),
[](node_ptr feat){return feat->n_params();}
);
std::cout << "inds 1" << std::endl;
std::vector<int> inds = util_funcs::argsort<int>(feat_n_params);
std::vector<node_ptr> feat_set_copy(feat_n_params.size());
std::copy_n(feat_set.begin() + start, feat_n_params.size(), feat_set_copy.begin());
std::cout << "transform 2" << std::endl;
std::transform(
inds.begin(),
inds.end(),
......@@ -604,7 +600,6 @@ int FeatureSpace::reorder_by_n_params(std::vector<node_ptr>& feat_set, int start
}
// Set how many features have no parameters
std::cout << "ret" << std::endl;
return start + std::count_if(
feat_n_params.begin(),
feat_n_params.end(),
......@@ -993,14 +988,18 @@ void FeatureSpace::generate_feature_space(
}
}
}
#pragma omp parallel for
for(int ff = start_rung.back(); ff < feat_set.size(); ++ff)
{
feat_set[ff]->reindex(ff + n_feat_below_rank, ff);
feat_set[ff]->set_value();
feat_set[ff]->set_test_value();
}
#ifdef PARAMETERIZE
if(!reparam)
{
int no_param_sz = reorder_by_n_params(feat_set, start_rung.back());
_end_no_params.push_back(no_param_sz);
}
#endif
}
}
}
......
......@@ -305,4 +305,54 @@ namespace
EXPECT_EQ(phi.size(), 6);
}
#ifdef PARAMETERIZE
TEST_F(FeatSpaceTest, ReorderByNparamsTest)
{
nlopt_wrapper::MAX_PARAM_DEPTH = 2;
node_value_arrs::initialize_param_storage();
_inputs.set_max_rung(2);
_inputs.set_max_param_depth(2);
_inputs.set_n_rung_store(0);
_inputs.set_n_rung_generate(0);
_inputs.set_prop_train(_prop);
FeatureSpace feat_space(_inputs);
std::vector<node_ptr> phi(8);
phi[0] = feat_space.phi0()[0];
phi[1] = feat_space.phi0()[1];
phi[2] = feat_space.phi0()[2];
phi[0]->set_value();
phi[1]->set_value();
phi[2]->set_value();
double* val_ptr = feat_space.phi0()[1]->value_ptr();
std::vector<double> prop_cb(_prop.size(), 0.0);
std::transform(val_ptr, val_ptr + prop_cb.size(), prop_cb.begin(), [](double val){return std::pow(val + 2.0, 3.0);});
std::shared_ptr<NLOptimizer> optimizer_cb = nlopt_wrapper::get_optimizer("regression", _inputs.task_sizes_train(), prop_cb, 2);
std::vector<double> prop_lor(_prop.size(), 0.0);
std::transform(val_ptr, val_ptr + prop_lor.size(), prop_lor.begin(), [](double val){return 1.0 / (2.0 + std::pow(val, 2.0));});
std::shared_ptr<NLOptimizer> optimizer_lor = nlopt_wrapper::get_optimizer("regression", _inputs.task_sizes_train(), prop_lor, 2);
phi[3] = std::make_shared<CbNode>(phi[1], 5, 1e-50, 1e50);
phi[4] = std::make_shared<CbParamNode>(phi[1], 6, 1e-50, 1e50, optimizer_cb);
phi[5] = std::make_shared<SqNode>(phi[1], 7, 1e-50, 1e50);
phi[6] = std::make_shared<InvParamNode>(phi[5], 9, 1e-50, 1e50, optimizer_lor);
phi[7] = std::make_shared<InvNode>(phi[5], 8, 1e-50, 1e50);
EXPECT_EQ(feat_space.reorder_by_n_params(phi, 5), 7);
EXPECT_EQ(phi[4]->n_params(), 2);
EXPECT_EQ(phi[6]->n_params(), 0);
EXPECT_EQ(phi[7]->n_params(), 4);
EXPECT_EQ(feat_space.reorder_by_n_params(phi, 0), 6);
EXPECT_EQ(phi[4]->n_params(), 0);
EXPECT_EQ(phi[6]->n_params(), 2);
EXPECT_EQ(phi[7]->n_params(), 4);
}
#endif
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment