Commit ed8541a2 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Update googletests to not use EQ

Subtract and less then 1e-10
parent 4599cf13
......@@ -108,7 +108,7 @@ test-intel-pytest-py:
- export PATH=$INTEL_COMP_ROOT/bin/:$INTEL_COMP_ROOT/bin/intel64:$I_MPI_ROOT/bin:$PATH
- export OMP_NUM_THREADS=2
- export OMP_PLACES=cores
- pytest --ignore=tests/pytest/test_param.py --ignore=tests/pytest/test_feature_creation/test_parameterize/ --ignore=tests/pytest/test_model_eval/test_param_model_node/ --ignore=tests/pytest/test_model_eval/test_models/test_reg_model_param.py tests/pytest
- pytest --ignore=tests/pytest/test_param.py --ignore=tests/pytest/test_feature_creation/test_parameterize/ --ignore=tests/pytest/test_model_eval/test_param_model_node/ --ignore=tests/pytest/test_model_eval/test_models/test_reg_model_param.py --ignore=tests/pytest/test_nl_opt/ tests/pytest
test-intel-pytest-param-py:
stage: unit_test
......@@ -268,7 +268,7 @@ test-gnu-pytest-py:
- export PYTHONPATH=$HOME/intel/oneapi/intelpython/latest/lib/python3.7/site-packages/:cpp_sisso_gnu_py_env/lib/python3.7/site-packages/
- export OMP_NUM_THREADS=2
- export OMP_PLACES=cores
- pytest --ignore=tests/pytest/test_param.py --ignore=tests/pytest/test_feature_creation/test_parameterize/ --ignore=tests/pytest/test_model_eval/test_param_model_node/ --ignore=tests/pytest/test_model_eval/test_models/test_reg_model_param.py tests/pytest
- pytest --ignore=tests/pytest/test_param.py --ignore=tests/pytest/test_feature_creation/test_parameterize/ --ignore=tests/pytest/test_model_eval/test_param_model_node/ --ignore=tests/pytest/test_model_eval/test_models/test_reg_model_param.py --ignore=tests/pytest/test_nl_opt/ tests/pytest
test-gnu-pytest-param-py:
stage: unit_test
......
......@@ -385,7 +385,7 @@ void FeatureSpace::generate_feature_space(
node_value_arrs::clear_temp_reg();
double start = omp_get_wtime();
#ifdef PARAMETERIZE
#pragma omp parallel firstprivate(feat_ind, l_bound, u_bound)
#pragma omp parallel firstprivate(feat_ind)
{
std::vector<node_ptr> next_phi_private;
std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes_train, _prop_train, _max_rung, _max_param_depth);
......@@ -421,7 +421,7 @@ void FeatureSpace::generate_feature_space(
next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
}
#else
#pragma omp parallel firstprivate(feat_ind, l_bound, u_bound)
#pragma omp parallel firstprivate(feat_ind)
{
std::vector<node_ptr> next_phi_private;
#ifdef OMP45
......
......@@ -173,8 +173,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0 * util_funcs::sign(v1 - (alpha * v2 + a));
EXPECT_EQ(_gradient[0], df_dp * v2);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v2), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(AbsDiffParamNodeTest, AttributesTest)
......
......@@ -158,8 +158,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = util_funcs::sign(alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(AbsParamNodeTest, AttributesTest)
......
......@@ -171,8 +171,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 1.0;
EXPECT_EQ(_gradient[0], df_dp * v2);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v2), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(AddParamNodeTest, AttributesTest)
......
......@@ -173,8 +173,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 3.0 * std::pow(alpha * v1 + a, 2.0);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(CbParamNodeTest, AttributesTest)
......
......@@ -170,8 +170,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 1.0 / 3.0 * std::pow(alpha * v1 + a, -2.0 / 3.0);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(CbrtParamNodeTest, AttributesTest)
......
......@@ -198,8 +198,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0 * std::sin(alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(CosParamNodeTest, AttributesTest)
......
......@@ -171,8 +171,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0 * v1 / std::pow(alpha * v2 + a, 2.0);
EXPECT_EQ(_gradient[0], df_dp * v2);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v2), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(DivParamNodeTest, AttributesTest)
......
......@@ -211,8 +211,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = std::exp(alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(ExpParamNodeTest, AttributesTest)
......
......@@ -170,8 +170,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0 / std::pow(alpha * v1 + a, 2.0);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(InvParamNodeTest, AttributesTest)
......
......@@ -214,8 +214,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 1.0 / (alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(LogParamNodeTest, AttributesTest)
......
......@@ -171,8 +171,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = v1;
EXPECT_EQ(_gradient[0], df_dp * v2);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v2), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(MultParamNodeTest, AttributesTest)
......
......@@ -212,8 +212,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0 * std::exp(-1.0 * alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(NegExpParamNodeTest, AttributesTest)
......
......@@ -198,8 +198,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = std::cos(alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(SinParamNodeTest, AttributesTest)
......
......@@ -169,8 +169,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 6.0 * std::pow(alpha * v1 + a, 5.0);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(SixPowParamNodeTest, AttributesTest)
......
......@@ -169,8 +169,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 2.0 * (alpha * v1 + a);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(SqParamNodeTest, AttributesTest)
......
......@@ -169,8 +169,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = 0.5 * std::pow(alpha * v1 + a, -0.5);
EXPECT_EQ(_gradient[0], df_dp * v1);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v1), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(SqrtParamNodeTest, AttributesTest)
......
......@@ -171,8 +171,8 @@ namespace
double a = copy_test->parameters()[1];
double df_dp = -1.0;
EXPECT_EQ(_gradient[0], df_dp * v2);
EXPECT_EQ(_gradient[_task_sizes_train[0]], df_dp);
EXPECT_LT(std::abs(_gradient[0] - df_dp * v2), 1e-10);
EXPECT_LT(std::abs(_gradient[_task_sizes_train[0]] - df_dp), 1e-10);
}
TEST_F(SubParamNodeTest, AttributesTest)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment