Skip to content
Snippets Groups Projects
Commit 09ae63e9 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Add Unit Tests for the LossFunctions

parent 4c457119
No related branches found
No related tags found
No related merge requests found
// Copyright 2021 Thomas A. R. Purcell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "loss_function/LossFunctionConvexHull.hpp"
#include "mpi_interface/MPI_Interface.hpp"
#include "gtest/gtest.h"
#include <random>
namespace
{
class LossFunctionConvexHullTests : public ::testing::Test
{
protected:
void SetUp() override
{
mpi_setup::init_mpi_env();
_task_sizes_train = {80};
_task_sizes_test = {20};
node_value_arrs::initialize_values_arr(_task_sizes_train, _task_sizes_test, 2, 2, false);
node_value_arrs::initialize_d_matrix_arr();
node_value_arrs::resize_d_matrix_arr(2);
std::vector<double> value_1(_task_sizes_train[0], 0.0);
std::vector<double> value_2(_task_sizes_train[0], 0.0);
std::vector<double> test_value_1(_task_sizes_test[0], 0.0);
std::vector<double> test_value_2(_task_sizes_test[0], 0.0);
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution_12_pos(1.0, 2.0);
std::uniform_real_distribution<double> distribution_12_neg(-2.0, -1.0);
for(int ii = 0; ii < 20; ++ii)
{
value_1[ii] = distribution_12_neg(generator);
value_2[ii] = distribution_12_neg(generator);
}
value_1[0] = -0.99;
value_1[1] = -2.01;
value_2[0] = -0.99;
value_2[1] = -2.01;
for(int ii = 20; ii < 40; ++ii)
{
value_1[ii] = distribution_12_pos(generator);
value_2[ii] = distribution_12_pos(generator);
}
value_1[20] = 0.99;
value_1[21] = 2.01;
value_2[20] = 0.99;
value_2[21] = 2.01;
for(int ii = 40; ii < 60; ++ii)
{
value_1[ii] = distribution_12_neg(generator);
value_2[ii] = distribution_12_pos(generator);
}
value_1[40] = -0.99;
value_1[41] = -2.01;
value_2[40] = 0.99;
value_2[41] = 2.01;
for(int ii = 60; ii < 80; ++ii)
{
value_1[ii] = distribution_12_pos(generator);
value_2[ii] = distribution_12_neg(generator);
}
value_1[60] = 0.99;
value_1[61] = 2.01;
value_2[60] = -0.99;
value_2[61] = -2.01;
for(int ii = 0; ii < 5; ++ii)
{
test_value_1[ii] = distribution_12_neg(generator);
test_value_2[ii] = distribution_12_neg(generator);
}
for(int ii = 5; ii < 10; ++ii)
{
test_value_1[ii] = distribution_12_pos(generator);
test_value_2[ii] = distribution_12_pos(generator);
}
for(int ii = 10; ii < 15; ++ii)
{
test_value_1[ii] = distribution_12_neg(generator);
test_value_2[ii] = distribution_12_pos(generator);
}
for(int ii = 15; ii < 20; ++ii)
{
test_value_1[ii] = distribution_12_pos(generator);
test_value_2[ii] = distribution_12_neg(generator);
}
_phi.push_back(std::make_shared<FeatureNode>(0, "A", value_1, test_value_1, Unit("m")));
_phi.push_back(std::make_shared<FeatureNode>(1, "B", value_2, test_value_2, Unit("m")));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[0]));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[1]));
std::copy_n(value_1.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(0));
std::copy_n(value_2.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(1));
_prop_train.resize(_task_sizes_train[0], 0.0);
std::fill_n(_prop_train.begin() + 20, 20, 1.0);
std::fill_n(_prop_train.begin() + 40, 20, 2.0);
std::fill_n(_prop_train.begin() + 60, 20, 3.0);
_prop_test.resize(_task_sizes_test[0], 0.0);
std::fill_n(_prop_test.begin() + 5, 5, 1.0);
std::fill_n(_prop_test.begin() + 10, 5, 2.0);
std::fill_n(_prop_test.begin() + 15, 5, 3.0);
}
std::vector<node_ptr> _phi;
std::vector<model_node_ptr> _model_phi;
std::vector<double> _prop_train;
std::vector<double> _prop_test;
std::vector<int> _task_sizes_train;
std::vector<int> _task_sizes_test;
};
TEST_F(LossFunctionConvexHullTests, NewConstructior)
{
LossFunctionConvexHull loss(
_prop_train,
_prop_test,
_task_sizes_train,
_task_sizes_test,
2
);
EXPECT_EQ(std::floor(loss.project(_phi[0])), 80);
EXPECT_EQ(std::floor(loss.project(_phi[1])), 80);
EXPECT_EQ(loss({0, 1}), 0);
EXPECT_EQ(loss(_model_phi), 0);
EXPECT_EQ(loss.test_loss(_model_phi), 0);
EXPECT_EQ(loss.n_class(), 4);
EXPECT_EQ(loss.type(), LOSS_TYPE::CONVEX_HULL);
}
TEST_F(LossFunctionConvexHullTests, CopyTest)
{
LossFunctionConvexHull loss(
_prop_train,
_prop_test,
_task_sizes_train,
_task_sizes_test,
2
);
LossFunctionConvexHull loss_copy(std::make_shared<LossFunctionConvexHull>(loss));
EXPECT_EQ(std::floor(loss_copy.project(_phi[0])), 80);
EXPECT_EQ(std::floor(loss_copy.project(_phi[1])), 80);
EXPECT_EQ(loss_copy({0, 1}), 0);
EXPECT_EQ(loss_copy(_model_phi), 0);
EXPECT_EQ(loss_copy.test_loss(_model_phi), 0);
EXPECT_EQ(loss_copy.n_class(), 4);
EXPECT_EQ(loss_copy.type(), LOSS_TYPE::CONVEX_HULL);
}
}
// Copyright 2021 Thomas A. R. Purcell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "loss_function/LossFunctionLogPearsonRMSE.hpp"
#include "mpi_interface/MPI_Interface.hpp"
#include "gtest/gtest.h"
#include <random>
namespace
{
class LossFunctionLogPearsonRMSETests : public ::testing::Test
{
protected:
void SetUp() override
{
mpi_setup::init_mpi_env();
_task_sizes_train = {80};
_task_sizes_test = {20};
node_value_arrs::initialize_values_arr(_task_sizes_train, _task_sizes_test, 2, 2, false);
node_value_arrs::initialize_d_matrix_arr();
node_value_arrs::resize_d_matrix_arr(2);
std::vector<double> value_1(_task_sizes_train[0], 0.0);
std::vector<double> value_2(_task_sizes_train[0], 0.0);
std::vector<double> test_value_1(_task_sizes_test[0], 0.0);
std::vector<double> test_value_2(_task_sizes_test[0], 0.0);
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution_feats(0.01, 100.0);
std::uniform_real_distribution<double> distribution_params(0.9, 1.1);
for(int ii = 0; ii < _task_sizes_train[0]; ++ii)
{
value_1[ii] = distribution_feats(generator);
value_2[ii] = distribution_feats(generator);
}
for(int ii = 0; ii < _task_sizes_test[0]; ++ii)
{
test_value_1[ii] = distribution_feats(generator);
test_value_2[ii] = distribution_feats(generator);
}
_log_feat.resize(_task_sizes_train[0]);
_phi.push_back(std::make_shared<FeatureNode>(0, "A", value_1, test_value_1, Unit("m")));
_phi.push_back(std::make_shared<FeatureNode>(1, "B", value_2, test_value_2, Unit("m")));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[0]));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[1]));
std::copy_n(value_1.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(0));
std::copy_n(value_2.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(1));
double a0 = distribution_params(generator);
double a1 = distribution_params(generator);
double c0 = distribution_params(generator);
_prop_train.resize(_task_sizes_train[0], 0.0);
std::transform(value_1.begin(), value_1.end(), value_2.begin(), _prop_train.begin(), [=](double v0, double v1){return c0 * std::pow(v0, a0) * std::pow(v1, a1);});
_prop_test.resize(_task_sizes_test[0], 0.0);
std::transform(test_value_1.begin(), test_value_1.end(), test_value_2.begin(), _prop_test.begin(), [=](double v0, double v1){return c0 * std::pow(v0, a0) * std::pow(v1, a1);});
_prop_train_no_bias.resize(_task_sizes_train[0], 0.0);
std::transform(value_1.begin(), value_1.end(), value_2.begin(), _prop_train_no_bias.begin(), [=](double v0, double v1){return std::pow(v0, a0) * std::pow(v1, a1);});
_prop_test_no_bias.resize(_task_sizes_test[0], 0.0);
std::transform(test_value_1.begin(), test_value_1.end(), test_value_2.begin(), _prop_test_no_bias.begin(), [=](double v0, double v1){return std::pow(v0, a0) * std::pow(v1, a1);});
_log_prop_train.resize(_task_sizes_train[0]);
_log_prop_test.resize(_task_sizes_train[0]);
_log_prop_train_no_bias.resize(_task_sizes_train[0]);
_log_prop_test_no_bias.resize(_task_sizes_train[0]);
std::transform(_prop_train.begin(), _prop_train.end(), _log_prop_train.begin(), [](double p){return std::log(p);});
std::transform(_prop_test.begin(), _prop_test.end(), _log_prop_test.begin(), [](double p){return std::log(p);});
std::transform(_prop_train_no_bias.begin(), _prop_train_no_bias.end(), _log_prop_train_no_bias.begin(), [](double p){return std::log(p);});
std::transform(_prop_test_no_bias.begin(), _prop_test_no_bias.end(), _log_prop_test_no_bias.begin(), [](double p){return std::log(p);});
}
std::vector<node_ptr> _phi;
std::vector<model_node_ptr> _model_phi;
std::vector<double> _prop_train;
std::vector<double> _prop_test;
std::vector<double> _prop_train_no_bias;
std::vector<double> _prop_test_no_bias;
std::vector<double> _log_feat;
std::vector<double> _log_prop_train;
std::vector<double> _log_prop_test;
std::vector<double> _log_prop_train_no_bias;
std::vector<double> _log_prop_test_no_bias;
std::vector<int> _task_sizes_train;
std::vector<int> _task_sizes_test;
};
TEST_F(LossFunctionLogPearsonRMSETests, NoFixIntercept)
{
LossFunctionLogPearsonRMSE loss(
_log_prop_train,
_log_prop_test,
_task_sizes_train,
_task_sizes_test,
false,
2
);
EXPECT_LT(
std::abs(
loss.project(_phi[0]) +
util_funcs::log_r2(_phi[0]->value_ptr(), _log_prop_train.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss.project(_phi[1]) +
util_funcs::log_r2(_phi[1]->value_ptr(), _log_prop_train.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss({0, 1}), 1e-5);
EXPECT_LT(loss(_model_phi), 1e-5);
EXPECT_LT(loss.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss.type(), LOSS_TYPE::LOG_PEARSON_RMSE);
}
TEST_F(LossFunctionLogPearsonRMSETests, CopyNoFixIntercept)
{
LossFunctionLogPearsonRMSE loss(
_log_prop_train,
_log_prop_test,
_task_sizes_train,
_task_sizes_test,
false,
2
);
LossFunctionLogPearsonRMSE loss_copy(std::make_shared<LossFunctionLogPearsonRMSE>(loss));
EXPECT_LT(
std::abs(
loss_copy.project(_phi[0]) +
util_funcs::log_r2(_phi[0]->value_ptr(), _log_prop_train.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss_copy.project(_phi[1]) +
util_funcs::log_r2(_phi[1]->value_ptr(), _log_prop_train.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss_copy({0, 1}), 1e-5);
EXPECT_LT(loss_copy(_model_phi), 1e-5);
EXPECT_LT(loss_copy.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss_copy.type(), LOSS_TYPE::LOG_PEARSON_RMSE);
}
TEST_F(LossFunctionLogPearsonRMSETests, FixIntercept)
{
LossFunctionLogPearsonRMSE loss(
_log_prop_train_no_bias,
_log_prop_test_no_bias,
_task_sizes_train,
_task_sizes_test,
true,
2
);
EXPECT_LT(
std::abs(
loss.project(_phi[0]) +
util_funcs::log_r2(_phi[0]->value_ptr(), _log_prop_train_no_bias.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss.project(_phi[1]) +
util_funcs::log_r2(_phi[1]->value_ptr(), _log_prop_train_no_bias.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss({0, 1}), 1e-5);
EXPECT_LT(loss(_model_phi), 1e-5);
EXPECT_LT(loss.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss.type(), LOSS_TYPE::LOG_PEARSON_RMSE);
}
TEST_F(LossFunctionLogPearsonRMSETests, CopyFixIntercept)
{
LossFunctionLogPearsonRMSE loss(
_log_prop_train_no_bias,
_log_prop_test_no_bias,
_task_sizes_train,
_task_sizes_test,
true,
2
);
LossFunctionLogPearsonRMSE loss_copy(std::make_shared<LossFunctionLogPearsonRMSE>(loss));
EXPECT_LT(
std::abs(
loss_copy.project(_phi[0]) +
util_funcs::log_r2(_phi[0]->value_ptr(), _log_prop_train_no_bias.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss_copy.project(_phi[1]) +
util_funcs::log_r2(_phi[1]->value_ptr(), _log_prop_train_no_bias.data(), _log_feat.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss_copy({0, 1}), 1e-5);
EXPECT_LT(loss_copy(_model_phi), 1e-5);
EXPECT_LT(loss_copy.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss_copy.type(), LOSS_TYPE::LOG_PEARSON_RMSE);
}
}
// Copyright 2021 Thomas A. R. Purcell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "loss_function/LossFunctionPearsonRMSE.hpp"
#include "mpi_interface/MPI_Interface.hpp"
#include "gtest/gtest.h"
#include <random>
namespace
{
class LossFunctionPearsonRMSETests : public ::testing::Test
{
protected:
void SetUp() override
{
mpi_setup::init_mpi_env();
_task_sizes_train = {80};
_task_sizes_test = {20};
node_value_arrs::initialize_values_arr(_task_sizes_train, _task_sizes_test, 2, 2, false);
node_value_arrs::initialize_d_matrix_arr();
node_value_arrs::resize_d_matrix_arr(2);
std::vector<double> value_1(_task_sizes_train[0], 0.0);
std::vector<double> value_2(_task_sizes_train[0], 0.0);
std::vector<double> test_value_1(_task_sizes_test[0], 0.0);
std::vector<double> test_value_2(_task_sizes_test[0], 0.0);
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution_feats(-50.0, 50.0);
std::uniform_real_distribution<double> distribution_params(-2.50, 2.50);
for(int ii = 0; ii < _task_sizes_train[0]; ++ii)
{
value_1[ii] = distribution_feats(generator);
value_2[ii] = distribution_feats(generator);
}
for(int ii = 0; ii < _task_sizes_test[0]; ++ii)
{
test_value_1[ii] = distribution_feats(generator);
test_value_2[ii] = distribution_feats(generator);
}
_phi.push_back(std::make_shared<FeatureNode>(0, "A", value_1, test_value_1, Unit("m")));
_phi.push_back(std::make_shared<FeatureNode>(1, "B", value_2, test_value_2, Unit("m")));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[0]));
_model_phi.push_back(std::make_shared<ModelNode>(_phi[1]));
std::copy_n(value_1.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(0));
std::copy_n(value_2.data(), _task_sizes_train[0], node_value_arrs::get_d_matrix_ptr(1));
double a0 = distribution_params(generator);
double a1 = distribution_params(generator);
double c0 = distribution_params(generator);
_prop_train.resize(_task_sizes_train[0], 0.0);
std::transform(value_1.begin(), value_1.end(), value_2.begin(), _prop_train.begin(), [=](double v0, double v1){return c0 + a0 * v0 + a1 * v1;});
_prop_test.resize(_task_sizes_test[0], 0.0);
std::transform(test_value_1.begin(), test_value_1.end(), test_value_2.begin(), _prop_test.begin(), [=](double v0, double v1){return c0 + a0 * v0 + a1 * v1;});
_prop_train_no_bias.resize(_task_sizes_train[0], 0.0);
std::transform(value_1.begin(), value_1.end(), value_2.begin(), _prop_train_no_bias.begin(), [=](double v0, double v1){return a0 * v0 + a1 * v1;});
_prop_test_no_bias.resize(_task_sizes_test[0], 0.0);
std::transform(test_value_1.begin(), test_value_1.end(), test_value_2.begin(), _prop_test_no_bias.begin(), [=](double v0, double v1){return a0 * v0 + a1 * v1;});
}
std::vector<node_ptr> _phi;
std::vector<model_node_ptr> _model_phi;
std::vector<double> _prop_train;
std::vector<double> _prop_test;
std::vector<double> _prop_train_no_bias;
std::vector<double> _prop_test_no_bias;
std::vector<int> _task_sizes_train;
std::vector<int> _task_sizes_test;
};
TEST_F(LossFunctionPearsonRMSETests, NoFixIntercept)
{
LossFunctionPearsonRMSE loss(
_prop_train,
_prop_test,
_task_sizes_train,
_task_sizes_test,
false,
2
);
EXPECT_LT(
std::abs(
loss.project(_phi[0]) +
util_funcs::r2(_phi[0]->value_ptr(), _prop_train.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss.project(_phi[1]) +
util_funcs::r2(_phi[1]->value_ptr(), _prop_train.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss({0, 1}), 1e-5);
EXPECT_LT(loss(_model_phi), 1e-5);
EXPECT_LT(loss.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss.type(), LOSS_TYPE::PEARSON_RMSE);
}
TEST_F(LossFunctionPearsonRMSETests, CopyNoFixIntercept)
{
LossFunctionPearsonRMSE loss(
_prop_train,
_prop_test,
_task_sizes_train,
_task_sizes_test,
false,
2
);
LossFunctionPearsonRMSE loss_copy(std::make_shared<LossFunctionPearsonRMSE>(loss));
EXPECT_LT(
std::abs(
loss_copy.project(_phi[0]) +
util_funcs::r2(_phi[0]->value_ptr(), _prop_train.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss_copy.project(_phi[1]) +
util_funcs::r2(_phi[1]->value_ptr(), _prop_train.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss_copy({0, 1}), 1e-5);
EXPECT_LT(loss_copy(_model_phi), 1e-5);
EXPECT_LT(loss_copy.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss_copy.type(), LOSS_TYPE::PEARSON_RMSE);
}
TEST_F(LossFunctionPearsonRMSETests, FixIntercept)
{
LossFunctionPearsonRMSE loss(
_prop_train_no_bias,
_prop_test_no_bias,
_task_sizes_train,
_task_sizes_test,
true,
2
);
EXPECT_LT(
std::abs(
loss.project(_phi[0]) +
util_funcs::r2(_phi[0]->value_ptr(), _prop_train_no_bias.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss.project(_phi[1]) +
util_funcs::r2(_phi[1]->value_ptr(), _prop_train_no_bias.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss({0, 1}), 1e-5);
EXPECT_LT(loss(_model_phi), 1e-5);
EXPECT_LT(loss.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss.type(), LOSS_TYPE::PEARSON_RMSE);
}
TEST_F(LossFunctionPearsonRMSETests, CopyFixIntercept)
{
LossFunctionPearsonRMSE loss(
_prop_train_no_bias,
_prop_test_no_bias,
_task_sizes_train,
_task_sizes_test,
true,
2
);
LossFunctionPearsonRMSE loss_copy(std::make_shared<LossFunctionPearsonRMSE>(loss));
EXPECT_LT(
std::abs(
loss_copy.project(_phi[0]) +
util_funcs::r2(_phi[0]->value_ptr(), _prop_train_no_bias.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(
std::abs(
loss_copy.project(_phi[1]) +
util_funcs::r2(_phi[1]->value_ptr(), _prop_train_no_bias.data(), _task_sizes_train[0])
),
1e-10
);
EXPECT_LT(loss_copy({0, 1}), 1e-5);
EXPECT_LT(loss_copy(_model_phi), 1e-5);
EXPECT_LT(loss_copy.test_loss(_model_phi), 1e-5);
EXPECT_EQ(loss_copy.type(), LOSS_TYPE::PEARSON_RMSE);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment