Commit 6b1f9b3c authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Finialize all global variables after each google test

Prevent global variables persisting across all tests
parent 3ecfcb1b
......@@ -31,7 +31,7 @@ int prop_sorted_d_mat::N_CLASS = 0;
int prop_sorted_d_mat::N_FEATURES = 0;
int prop_sorted_d_mat::N_SAMPLES = 0;
void prop_sorted_d_mat::initialize_sroted_d_matrix_arr(int n_feat, int n_task, int n_class, std::vector<int> n_samples_per_class)
void prop_sorted_d_mat::initialize_sorted_d_matrix_arr(int n_feat, int n_task, int n_class, std::vector<int> n_samples_per_class)
{
if(n_samples_per_class.size() != n_task * n_class)
{
......@@ -51,11 +51,24 @@ void prop_sorted_d_mat::initialize_sroted_d_matrix_arr(int n_feat, int n_task, i
}
N_SAMPLES = std::accumulate(N_SAMPLES_PER_CLASS.begin(), N_SAMPLES_PER_CLASS.end(), 0);
SORTED_D_MATRIX.resize(N_FEATURES * N_SAMPLES);
SORTED_D_MATRIX = std::vector<double>(N_FEATURES * N_SAMPLES, 0.0);
}
void prop_sorted_d_mat::resize_sroted_d_matrix_arr(int n_feats)
void prop_sorted_d_mat::resize_sorted_d_matrix_arr(int n_feats)
{
N_FEATURES = n_feats;
SORTED_D_MATRIX.resize(N_FEATURES * N_SAMPLES);
}
void prop_sorted_d_mat::finalize_sorted_d_matrix_arr()
{
SORTED_D_MATRIX.resize(0);
CLASS_START.resize(0);
N_SAMPLES_PER_CLASS.resize(0);
N_TASK = 0;
N_CLASS = 0;
N_FEATURES = 0;
N_SAMPLES = 0;
}
......@@ -48,14 +48,19 @@ namespace prop_sorted_d_mat
* @param n_class The number of classes
* @param n_samples_per_class The number of samples in each class/task combination [task * N_TASK * N_CLASS + class]
*/
void initialize_sroted_d_matrix_arr(int n_feat, int n_task, int n_class, std::vector<int> n_samples_per_class);
void initialize_sorted_d_matrix_arr(int n_feat, int n_task, int n_class, std::vector<int> n_samples_per_class);
/**
* @brief Resize the descriptor matrix to match the current number of features
*
* @param n_feats Number of features to select
*/
void resize_sroted_d_matrix_arr(int n_feats);
void resize_sorted_d_matrix_arr(int n_feats);
/**
* @brief Restores all global variables to initial state
*/
void finalize_sorted_d_matrix_arr();
/**
* @brief Get the number of samples in a particular task/class combination
......
......@@ -100,7 +100,7 @@ int SISSOClassifier::get_max_error_ind(
void SISSOClassifier::transfer_d_mat_to_sorted() const
{
prop_sorted_d_mat::resize_sroted_d_matrix_arr(node_value_arrs::N_SELECTED);
prop_sorted_d_mat::resize_sorted_d_matrix_arr(node_value_arrs::N_SELECTED);
for(auto& el : _sample_inds_to_sorted_dmat_inds)
{
dcopy_(
......
......@@ -245,7 +245,7 @@ void node_value_arrs::resize_d_matrix_arr(const int n_select)
D_MATRIX.shrink_to_fit();
}
void node_value_arrs::finialize_values_arr()
void node_value_arrs::finalize_values_arr()
{
N_SELECTED = 0;
N_SAMPLES = 0;
......
......@@ -76,7 +76,7 @@ namespace node_value_arrs
/**
* @brief Resize all storage arrays to be empty
*/
void finialize_values_arr();
void finalize_values_arr();
/**
* @brief Initialize all central storage vectors/descriptive variables without changing MAX_RUNG
......
......@@ -228,7 +228,7 @@ void LossFunctionConvexHull::setup_lp(bool initialize_sorted_d_mat)
if(prop_sorted_d_mat::N_FEATURES == 0)
{
prop_sorted_d_mat::initialize_sroted_d_matrix_arr(0, _n_task, _n_class, n_samp_per_class);
prop_sorted_d_mat::initialize_sorted_d_matrix_arr(0, _n_task, _n_class, n_samp_per_class);
}
}
......
......@@ -121,6 +121,8 @@ int main(int argc, char const *argv[])
std::cout << sisso.models()[ii][0] << "\n" << std::endl;
}
}
prop_sorted_d_mat::finalize_sorted_d_matrix_arr();
comp_feats::reset_vectors();
}
mpi_setup::finalize_mpi_env();
return 0;
......
......@@ -36,16 +36,24 @@ void mpi_reduce_op::set_op(std::string project_type, double cross_cor_max, int n
if(project_type.compare("classification") == 0)
{
if(CROSS_COR_MAX < 0.99999)
{
IS_VALID = comp_feats::valid_feature_against_selected_spearman_mpi_op;
}
else
{
IS_VALID = comp_feats::valid_feature_against_selected_spearman_max_corr_1_mpi_op;
}
}
else
{
if(CROSS_COR_MAX < 0.99999)
{
IS_VALID = comp_feats::valid_feature_against_selected_pearson_mpi_op;
}
else
{
IS_VALID = comp_feats::valid_feature_against_selected_pearson_max_corr_1_mpi_op;
}
}
}
......
......@@ -110,7 +110,7 @@ void sisso::register_all()
def(
"finalize_values_arr",
&node_value_arrs::finialize_values_arr,
&node_value_arrs::finalize_values_arr,
"@DocString_node_vals_finalize@"
);
......
......@@ -21,7 +21,6 @@
#include "utils/compare_features.hpp"
#include <iomanip>
std::vector<double> comp_feats::CORR_CHECK;
std::vector<double> comp_feats::RANK;
std::vector<int> comp_feats::INDEX;
......@@ -64,6 +63,11 @@ void comp_feats::set_is_valid_fxn(
}
}
void comp_feats::reset_vectors()
{
RANK.resize(0);
INDEX.resize(0);
}
bool comp_feats::valid_feature_against_selected_pearson_max_corr_1(
const double* val_ptr,
......
......@@ -30,7 +30,6 @@ typedef std::tuple<node_ptr, double> node_sc_pair;
namespace comp_feats
{
extern std::vector<double> CORR_CHECK; //!< Vector for storing cross_corelation values
extern std::vector<double> RANK; //!< Global variable used to store the rank variables for Spearman correlation
extern std::vector<int> INDEX; //!< Global variable used to store the sorting indexes for Spearman correlation
......@@ -51,6 +50,11 @@ namespace comp_feats
std::function<bool(const double*, const int, const double, const std::vector<node_ptr>&, const std::vector<double>&, const double)>& is_valid_feat_list
);
/**
* @brief Reset the RANK and INDEX vectors to be size 0
*/
void reset_vectors();
/**
* @brief Checks the feature to see if it is still valid against previously selected features (using the Pearson correlation coefficient with a maximum cross-correlation of 1.0)
*
......
......@@ -24,7 +24,6 @@ namespace
protected:
void SetUp() override
{
node_value_arrs::initialize_d_matrix_arr();
mpi_setup::init_mpi_env();
_task_sizes_train = {40, 40};
......@@ -68,6 +67,11 @@ namespace
};
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
std::vector<FeatureNode> _phi;
std::vector<double> _prop;
std::vector<int> _task_sizes_train;
......
......@@ -25,13 +25,14 @@ namespace
protected:
void SetUp() override
{
node_value_arrs::initialize_d_matrix_arr();
mpi_setup::init_mpi_env();
std::vector<int> task_sizes_train = {80};
std::vector<int> task_sizes_test = {20};
node_value_arrs::initialize_values_arr(task_sizes_train, task_sizes_test, 2, 2, false);
node_value_arrs::initialize_d_matrix_arr();
node_value_arrs::resize_d_matrix_arr(2);
std::vector<double> value_1(task_sizes_train[0], 0.0);
std::vector<double> value_2(task_sizes_train[0], 0.0);
......@@ -93,6 +94,7 @@ namespace
FeatureNode(1, "B", value_2, test_value_2, Unit("m"))
};
_tol = 1e-5;
_error.resize(80, 0.0);
_test_error.resize(20, 0.0);
_task_num = 0;
......@@ -104,11 +106,17 @@ namespace
_samp_per_class = std::vector<int>(4, 20);
_samp_per_class_test = std::vector<int>(4, 5);
prop_sorted_d_mat::initialize_sroted_d_matrix_arr(2, 1, 4, _samp_per_class);
prop_sorted_d_mat::initialize_sorted_d_matrix_arr(2, 1, 4, _samp_per_class);
std::copy_n(value_1.data(), value_1.size(), prop_sorted_d_mat::access_sorted_d_matrix(0));
std::copy_n(value_2.data(), value_2.size(),prop_sorted_d_mat::access_sorted_d_matrix(1));
}
void TearDown() override
{
prop_sorted_d_mat::finalize_sorted_d_matrix_arr();
node_value_arrs::finalize_values_arr();
}
std::vector<FeatureNode> _phi;
std::vector<double> _error;
......
......@@ -38,7 +38,7 @@ namespace
TEST_F(PropSOrtedDMatTests, TestSortedDMat)
{
prop_sorted_d_mat::initialize_sroted_d_matrix_arr(1, _n_task, _n_class, _n_samples_per_class);
prop_sorted_d_mat::initialize_sorted_d_matrix_arr(1, _n_task, _n_class, _n_samples_per_class);
EXPECT_EQ(prop_sorted_d_mat::CLASS_START[1], 18);
EXPECT_EQ(prop_sorted_d_mat::N_SAMPLES_PER_CLASS[1], 19);
EXPECT_EQ(prop_sorted_d_mat::N_FEATURES, 1);
......@@ -47,7 +47,7 @@ namespace
EXPECT_EQ(prop_sorted_d_mat::N_SAMPLES, std::accumulate(_n_samples_per_class.begin(), _n_samples_per_class.end(), 0));
EXPECT_EQ(prop_sorted_d_mat::SORTED_D_MATRIX.size(), 80);
prop_sorted_d_mat::resize_sroted_d_matrix_arr(_n_feat);
prop_sorted_d_mat::resize_sorted_d_matrix_arr(_n_feat);
EXPECT_EQ(prop_sorted_d_mat::N_FEATURES, _n_feat);
EXPECT_EQ(prop_sorted_d_mat::SORTED_D_MATRIX.size(), 80 * _n_feat);
......@@ -61,5 +61,6 @@ namespace
prop_sorted_d_mat::access_sample_sorted_d_matrix(2)[0] = 1.5;
EXPECT_EQ(prop_sorted_d_mat::access_sample_sorted_d_matrix(2, 0, 0)[0], 1.5);
prop_sorted_d_mat::finalize_sorted_d_matrix_arr();
}
}
......@@ -113,6 +113,11 @@ namespace
std::fill_n(_prop_test.begin() + 15, 5, 3.0);
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
std::vector<FeatureNode> _phi;
std::vector<double> _prop;
......
......@@ -24,13 +24,13 @@ namespace
void SetUp() override
{
allowed_op_maps::set_node_maps();
node_value_arrs::initialize_d_matrix_arr();
mpi_setup::init_mpi_env();
std::vector<int> task_sizes_train = {80};
std::vector<int> task_sizes_test = {20};
node_value_arrs::initialize_values_arr(task_sizes_train, task_sizes_test, 3, 2, false);
node_value_arrs::initialize_d_matrix_arr();
std::vector<std::string> sample_ids_train(task_sizes_train[0]);
for(int ii = 0; ii < task_sizes_train[0]; ++ii)
......@@ -161,6 +161,12 @@ namespace
inputs.set_n_models_store(3);
}
void TearDown() override
{
prop_sorted_d_mat::finalize_sorted_d_matrix_arr();
node_value_arrs::finalize_values_arr();
}
InputParser inputs;
};
......
......@@ -24,13 +24,13 @@ namespace
void SetUp() override
{
allowed_op_maps::set_node_maps();
node_value_arrs::initialize_d_matrix_arr();
mpi_setup::init_mpi_env();
std::vector<int> task_sizes_train = {90};
std::vector<int> task_sizes_test = {10};
node_value_arrs::initialize_values_arr(task_sizes_train, task_sizes_test, 3, 2, false);
node_value_arrs::initialize_d_matrix_arr();
std::vector<int> leave_out_inds = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
......@@ -121,6 +121,12 @@ namespace
inputs.set_n_residual(2);
inputs.set_n_models_store(3);
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
InputParser inputs;
std::vector<double> _prop;
......
......@@ -147,6 +147,12 @@ namespace
inputs.set_n_residual(2);
inputs.set_n_models_store(3);
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
InputParser inputs;
std::vector<double> _prop;
......
......@@ -46,6 +46,11 @@ namespace
_phi.push_back(std::make_shared<SubNode>(_feat_1, _feat_2, 4, -1e-50, 1e50));
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
node_ptr _feat_1;
node_ptr _feat_2;
node_ptr _feat_3;
......
......@@ -43,6 +43,11 @@ namespace
_phi.push_back(std::make_shared<AbsDiffNode>(_feat_1, _feat_3, 3, 1e-50, 1e50));
}
void TearDown() override
{
node_value_arrs::finalize_values_arr();
}
node_ptr _feat_1;
node_ptr _feat_2;
node_ptr _feat_3;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment