Commit 028ecb86 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Add Tests for inputs in GTest and pytest

parent e034183c
......@@ -35,9 +35,30 @@ FeatureNode::FeatureNode(const unsigned long int feat_ind, const std::string exp
{
if(set_val)
{
// Automatically resize the storage arrays
if(node_value_arrs::N_STORE_FEATURES == 0)
{
node_value_arrs::initialize_values_arr(_n_samp, _n_samp_test, 1);
}
else if((_n_samp != node_value_arrs::N_SAMPLES) || (_n_samp_test != node_value_arrs::N_SAMPLES_TEST))
{
throw std::logic_error(
"Number of samples in current feature is not the same as the others, (" +
std::to_string(_n_samp) +
" and " + std::to_string(_n_samp_test) +
" vs. " +
std::to_string(node_value_arrs::N_SAMPLES) +
" and " +
std::to_string(node_value_arrs::N_SAMPLES_TEST) +
")"
);
}
else if(feat_ind >= node_value_arrs::N_STORE_FEATURES)
{
node_value_arrs::resize_values_arr(0, node_value_arrs::N_STORE_FEATURES + 1);
}
set_value();
set_test_value();
}
}
......
......@@ -120,11 +120,16 @@ void node_value_arrs::set_max_rung(const int max_rung, bool use_params)
void node_value_arrs::set_task_sz_train(const std::vector<int> task_sz_train)
{
if(std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0) != N_SAMPLES)
if((N_SAMPLES > 0) && (std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0) != N_SAMPLES))
{
int n_samp_new = std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0);
throw std::logic_error("The total number of samples has changed from " + std::to_string(N_SAMPLES) + " to " + std::to_string(n_samp_new) + ", task_sz_train is wrong.");
}
else if(N_SAMPLES == 0)
{
N_SAMPLES = std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0);
}
TASK_SZ_TRAIN = task_sz_train;
TASK_START_TRAIN = std::vector<int>(TASK_SZ_TRAIN.size(), 0);
std::copy_n(TASK_SZ_TRAIN.begin(), TASK_SZ_TRAIN.size() - 1, &TASK_START_TRAIN[1]);
......@@ -132,11 +137,16 @@ void node_value_arrs::set_task_sz_train(const std::vector<int> task_sz_train)
void node_value_arrs::set_task_sz_test(const std::vector<int> task_sz_test)
{
if(std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0) != N_SAMPLES_TEST)
if((N_SAMPLES_TEST > 0) && (std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0) != N_SAMPLES_TEST))
{
int n_samp_new = std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0);
throw std::logic_error("The total number of test samples has changed from " + std::to_string(N_SAMPLES_TEST) + " to " + std::to_string(n_samp_new) + ", task_sz_test is wrong.");
}
else if(N_SAMPLES_TEST == 0)
{
N_SAMPLES_TEST = std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0);
}
TASK_SZ_TEST = task_sz_test;
}
......@@ -234,3 +244,32 @@ void node_value_arrs::resize_d_matrix_arr(const int n_select)
D_MATRIX.resize(N_SELECTED * N_SAMPLES, 0.0);
D_MATRIX.shrink_to_fit();
}
void node_value_arrs::finialize_values_arr()
{
N_SELECTED = 0;
N_SAMPLES = 0;
N_STORE_FEATURES = 0;
N_PRIMARY_FEATURES = 0;
N_RUNGS_STORED = 0;
N_SAMPLES_TEST = 0;
MAX_N_THREADS = omp_get_max_threads();
N_OP_SLOTS = 0;
N_PARAM_OP_SLOTS = 0;
MAX_RUNG = 0;
TEMP_STORAGE_REG.resize(0);
TEMP_STORAGE_TEST_REG.resize(0);
TASK_SZ_TRAIN.resize(0);
TASK_START_TRAIN.resize(0);
TASK_SZ_TEST.resize(0);
PARAM_STORAGE_ARR.resize(0);
PARAM_STORAGE_TEST_ARR.resize(0);
D_MATRIX.resize(0);
VALUES_ARR.resize(0);
TEST_VALUES_ARR.resize(0);
TEMP_STORAGE_ARR.resize(0);
TEMP_STORAGE_TEST_ARR.resize(0);
}
......@@ -71,6 +71,13 @@ namespace node_value_arrs
extern int MAX_N_THREADS; //!< Get the maximum number of threads possible for a calculation
extern int N_OP_SLOTS; //!< The number of possible nodes of the binary expression tree that maybe calculated on the fly
extern int N_PARAM_OP_SLOTS; //!< The number of possible non-leaf nodes of the binary expression tree
// DocString: node_vals_finalize
/**
* @brief Resize all storage arrays to be empty
*/
void finialize_values_arr();
/**
* @brief Initialize all central storage vectors/descriptive variables without changing MAX_RUNG
*
......
......@@ -43,7 +43,7 @@ InputParser::InputParser() :
_n_samp_test(0),
_n_residual(1),
_n_models_store(1),
_max_param_depth(0),
_max_param_depth(-1),
_nlopt_seed(42),
_fix_intercept(false),
_global_param_opt(false),
......
......@@ -183,7 +183,7 @@ public:
*/
inline const std::vector<std::string>& sample_ids_test() const
{
if(_sample_ids_test.size() != _n_samp_test)
if((_n_samp_test != 0) && (_sample_ids_test.size() != _n_samp_test))
{
throw std::logic_error("Accessing an unset member (_sample_ids_test).");
}
......@@ -195,7 +195,7 @@ public:
*/
inline std::vector<std::string> sample_ids_test_copy() const
{
if(_sample_ids_test.size() != _n_samp_test)
if((_n_samp_test != 0) && (_sample_ids_test.size() != _n_samp_test))
{
throw std::logic_error("Accessing an unset member (_sample_ids_test).");
}
......
......@@ -102,8 +102,15 @@ void sisso::register_all()
def(
"initialize_d_matrix_arr",
&node_value_arrs::initialize_d_matrix_arr,
"@DocString_node_vlas_init_d_mat@"
"@DocString_node_vals_init_d_mat@"
);
def(
"finalize_values_arr",
&node_value_arrs::finialize_values_arr,
"@DocString_node_vals_finalize@"
);
def(
"matlabify",
&str_utils::matlabify,
......@@ -183,8 +190,8 @@ void sisso::registerInputs()
.add_property("allowed_ops", &InputParser::allowed_ops_py, &InputParser::set_allowed_ops_py, "@DocString_inputs_allowed_ops_py@")
.add_property("prop_train", &InputParser::prop_train_arr, &InputParser::set_prop_train_arr, "@DocString_inputs_prop_train_arr@")
.add_property("prop_test", &InputParser::prop_test_arr, &InputParser::set_prop_test_arr, "@DocString_inputs_prop_test_arr@")
// .add_property("prop_train", &InputParser::prop_train_list, &InputParser::set_prop_train_list, "@DocString_inputs_prop_train_list@")
// .add_property("prop_test", &InputParser::prop_test_list, &InputParser::set_prop_test_list, "@DocString_inputs_prop_test_list@")
.add_property("prop_train", &InputParser::prop_train_list, &InputParser::set_prop_train_list, "@DocString_inputs_prop_train_list@")
.add_property("prop_test", &InputParser::prop_test_list, &InputParser::set_prop_test_list, "@DocString_inputs_prop_test_list@")
.add_property("leave_out_inds", &InputParser::leave_out_inds_py, &InputParser::set_leave_out_inds_py, "@DocString_inputs_leave_out_inds_py@")
.add_property("task_sizes_train", &InputParser::task_sizes_train_py, &InputParser::set_task_sizes_train_py, "@DocString_inputs_task_sizes_train_py@")
.add_property("task_sizes_test", &InputParser::task_sizes_test_py, &InputParser::set_task_sizes_test_py, "@DocString_inputs_task_sizes_test_py@")
......
......@@ -186,10 +186,5 @@ namespace
sisso.models().back()[0].to_file("sisso_classifier_train_model.dat", true);
sisso.models().back()[0].to_file("sisso_classifier_test_model.dat", false);
for(auto& feat : sisso.feat_space()->phi_selected())
{
std::cout << feat->expr() << std::endl;
}
}
}
Sample,task,property (m),A (m)
a,task_1,1.0,1.0
b,task_1,4.0,2.0
c,task_2,9.0,3.0
d,task_1,16.0,4.0
// Copyright 2021 Thomas A. R. Purcell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <inputs/InputParser.hpp>
#include "gtest/gtest.h"
namespace
{
class InputParserTests : public ::testing::Test
{
protected:
void SetUp() override
{
node_value_arrs::finialize_values_arr();
_sample_ids_train = {"a", "b", "c"};
_sample_ids_test = {"d"};
_task_names = {"task_1", "task_2"};
_allowed_param_ops = {"log"};
_allowed_ops = {"sq", "cb"};
_prop_train = {1.0, 4.0, 9.0};
_prop_test = {16.0};
_leave_out_inds = {3};
_task_sizes_train = {2, 1};
_task_sizes_test = {1, 0};
_phi_0 = {FeatureNode(0, "feat_1", {1.0, 2.0, 3.0}, {4.0}, Unit("m"))};
_prop_unit = Unit("m");
_filename = "googletest/inputs/sisso.json";
_data_file = "googletest/inputs/data.csv";
_prop_key = "property";
_prop_label = "property";
_task_key = "task";
_calc_type = "regression";
_cross_cor_max = 1.0;
_l_bound = 1e-5;
_u_bound = 1e8;
_n_dim = 2;
_max_rung = 1;
_n_rung_store = 1;
_n_rung_generate = 0;
_n_sis_select = 1;
_n_residual = 1;
_n_models_store = 1;
_max_param_depth = 1;
_nlopt_seed = 10;
_fix_intercept = false;
_global_param_opt = true;
_reparam_residual = true;
}
std::vector<std::string> _sample_ids_train; //!< Vector storing all sample ids for the training samples
std::vector<std::string> _sample_ids_test; //!< Vector storing all sample ids for the test samples
std::vector<std::string> _task_names; //!< Vector storing the ID of the task names
std::vector<std::string> _allowed_param_ops; //!< Vector containing all allowed operators strings for operators with free parameters
std::vector<std::string> _allowed_ops; //!< Vector containing all allowed operators strings
std::vector<double> _prop_train; //!< The value of the property to evaluate the loss function against for the training set
std::vector<double> _prop_test; //!< The value of the property to evaluate the loss function against for the test set
std::vector<int> _leave_out_inds; //!< List of indexes from the initial data file in the test set
std::vector<int> _task_sizes_train; //!< Number of training samples per task
std::vector<int> _task_sizes_test; //!< Number of testing samples per task
std::vector<FeatureNode> _phi_0; //!< A vector of FeatureNodes for the primary feature space
Unit _prop_unit; //!< The Unit of the property
std::string _filename; //!< Name of the input file
std::string _data_file; //!< Name of the data file
std::string _prop_key; //!< Key used to find the property column in the data file
std::string _prop_label; //!< The label of the property
std::string _task_key; //!< Key used to find the task column in the data file
std::string _calc_type; //!< The type of LossFunction to use when projecting the features onto a property
std::shared_ptr<MPI_Interface> _mpi_comm; //!< The MPI communicator for the calculation
double _cross_cor_max; //!< Maximum cross-correlation used for selecting features
double _l_bound; //!< The lower bound for the maximum absolute value of the features
double _u_bound; //!< The upper bound for the maximum absolute value of the features
int _n_dim; //!< The maximum number of features allowed in the linear model
int _max_rung; //!< Maximum rung for the feature creation
int _n_rung_store; //!< The number of rungs to calculate and store the value of the features for all samples
int _n_rung_generate; //!< Either 0 or 1, and is the number of rungs to generate on the fly during SIS
int _n_sis_select; //!< Number of features to select during each SIS iteration
int _n_samp; //!< Number of samples in the data set
int _n_samp_train; //!< Number of samples in the training set
int _n_samp_test; //!< Number of samples in the test set
int _n_residual; //!< Number of residuals to pass to the next sis model
int _n_models_store; //!< The number of models to output to files
int _max_param_depth; //!< The maximum depth in the binary expression tree to set non-linear optimization
int _nlopt_seed; //!< The seed used for the nlOpt library
bool _fix_intercept; //!< If true the bias term is fixed at 0
bool _global_param_opt; //!< True if global optimization is requested for non-linear optimization of parameters (Can break reproducibility)
bool _reparam_residual; //!< If True then reparameterize features using the residuals of each model
};
TEST_F(InputParserTests, DefaultConsructor)
{
InputParser inputs;
inputs.set_task_sizes_train(_task_sizes_train);
EXPECT_EQ(inputs.task_sizes_train()[0], _task_sizes_train[0]);
inputs.set_task_sizes_test(_task_sizes_test);
EXPECT_EQ(inputs.task_sizes_test()[0], _task_sizes_test[0]);
inputs.set_sample_ids_train(_sample_ids_train);
EXPECT_EQ(inputs.sample_ids_train()[0], _sample_ids_train[0]);
inputs.set_sample_ids_test(_sample_ids_test);
EXPECT_EQ(inputs.sample_ids_test()[0], _sample_ids_test[0]);
inputs.set_task_names(_task_names);
EXPECT_EQ(inputs.task_names()[0], _task_names[0]);
inputs.set_allowed_param_ops(_allowed_param_ops);
EXPECT_EQ(inputs.allowed_param_ops()[0], _allowed_param_ops[0]);
inputs.set_allowed_ops(_allowed_ops);
EXPECT_EQ(inputs.allowed_ops()[0], _allowed_ops[0]);
inputs.set_prop_train(_prop_train);
EXPECT_EQ(inputs.prop_train()[0], _prop_train[0]);
inputs.set_prop_test(_prop_test);
EXPECT_EQ(inputs.prop_test()[0], _prop_test[0]);
inputs.set_leave_out_inds(_leave_out_inds);
EXPECT_EQ(inputs.leave_out_inds()[0], _leave_out_inds[0]);
EXPECT_EQ(inputs.n_samp(), 4);
EXPECT_EQ(inputs.n_samp_test(), 1);
EXPECT_EQ(inputs.n_samp_train(), 3);
inputs.set_phi_0(_phi_0);
EXPECT_EQ(inputs.phi_0()[0].feat_ind(), _phi_0[0].feat_ind());
EXPECT_EQ(inputs.phi_0_ptrs()[0]->feat_ind(), _phi_0[0].feat_ind());
inputs.set_prop_unit(_prop_unit);
EXPECT_EQ(inputs.prop_unit(), _prop_unit);
inputs.set_filename(_filename);
EXPECT_EQ(inputs.filename(), _filename);
inputs.set_data_file(_data_file);
EXPECT_EQ(inputs.data_file(), _data_file);
inputs.set_prop_key(_prop_key);
EXPECT_EQ(inputs.prop_key(), _prop_key);
inputs.set_prop_label(_prop_label);
EXPECT_EQ(inputs.prop_label(), _prop_label);
inputs.set_task_key(_task_key);
EXPECT_EQ(inputs.task_key(), _task_key);
inputs.set_calc_type(_calc_type);
EXPECT_EQ(inputs.calc_type(), _calc_type);
inputs.set_cross_cor_max(_cross_cor_max);
EXPECT_EQ(inputs.cross_cor_max(), _cross_cor_max);
inputs.set_l_bound(_l_bound);
EXPECT_EQ(inputs.l_bound(), _l_bound);
inputs.set_u_bound(_u_bound);
EXPECT_EQ(inputs.u_bound(), _u_bound);
inputs.set_n_dim(_n_dim);
EXPECT_EQ(inputs.n_dim(), _n_dim);
inputs.set_max_rung(_max_rung);
EXPECT_EQ(inputs.max_rung(), _max_rung);
inputs.set_n_rung_store(_n_rung_store);
EXPECT_EQ(inputs.n_rung_store(), _n_rung_store);
inputs.set_n_rung_generate(_n_rung_generate);
EXPECT_EQ(inputs.n_rung_generate(), _n_rung_generate);
inputs.set_n_sis_select(_n_sis_select);
EXPECT_EQ(inputs.n_sis_select(), _n_sis_select);
inputs.set_n_residual(_n_residual);
EXPECT_EQ(inputs.n_residual(), _n_residual);
inputs.set_n_models_store(_n_models_store);
EXPECT_EQ(inputs.n_models_store(), _n_models_store);
inputs.set_max_param_depth(_max_param_depth);
EXPECT_EQ(inputs.max_param_depth(), _max_param_depth);
inputs.set_nlopt_seed(_nlopt_seed);
EXPECT_EQ(inputs.nlopt_seed(), _nlopt_seed);
inputs.set_fix_intercept(_fix_intercept);
EXPECT_EQ(inputs.fix_intercept(), _fix_intercept);
inputs.set_global_param_opt(_global_param_opt);
EXPECT_EQ(inputs.global_param_opt(), _global_param_opt);
inputs.set_reparam_residual(_reparam_residual);
EXPECT_EQ(inputs.reparam_residual(), _reparam_residual);
}
TEST_F(InputParserTests, FileConsructor)
{
boost::property_tree::ptree propTree;
boost::property_tree::json_parser::read_json(_filename, propTree);
InputParser inputs(propTree, _filename, mpi_setup::comm);
EXPECT_EQ(inputs.sample_ids_train()[0], _sample_ids_train[0]);
EXPECT_EQ(inputs.sample_ids_test()[0], _sample_ids_test[0]);
EXPECT_EQ(inputs.task_names()[0], _task_names[0]);
EXPECT_EQ(inputs.allowed_param_ops()[0], _allowed_param_ops[0]);
EXPECT_EQ(inputs.allowed_ops()[0], _allowed_ops[0]);
EXPECT_EQ(inputs.prop_train()[0], _prop_train[0]);
EXPECT_EQ(inputs.prop_test()[0], _prop_test[0]);
EXPECT_EQ(inputs.leave_out_inds()[0], _leave_out_inds[0]);
EXPECT_EQ(inputs.task_sizes_train()[0], _task_sizes_train[0]);
EXPECT_EQ(inputs.task_sizes_test()[0], _task_sizes_test[0]);
EXPECT_EQ(inputs.n_samp(), 4);
EXPECT_EQ(inputs.n_samp_test(), 1);
EXPECT_EQ(inputs.n_samp_train(), 3);
EXPECT_EQ(inputs.phi_0()[0].feat_ind(), _phi_0[0].feat_ind());
EXPECT_EQ(inputs.phi_0_ptrs()[0]->feat_ind(), _phi_0[0].feat_ind());
EXPECT_EQ(inputs.prop_unit(), _prop_unit);
EXPECT_EQ(inputs.filename(), _filename);
EXPECT_EQ(inputs.data_file(), _data_file);
EXPECT_EQ(inputs.prop_key(), _prop_key);
EXPECT_EQ(inputs.prop_label(), _prop_label);
EXPECT_EQ(inputs.task_key(), _task_key);
EXPECT_EQ(inputs.calc_type(), _calc_type);
EXPECT_EQ(inputs.cross_cor_max(), _cross_cor_max);
EXPECT_EQ(inputs.l_bound(), _l_bound);
EXPECT_EQ(inputs.u_bound(), _u_bound);
EXPECT_EQ(inputs.n_dim(), _n_dim);
EXPECT_EQ(inputs.max_rung(), _max_rung);
EXPECT_EQ(inputs.n_rung_store(), _n_rung_store);
EXPECT_EQ(inputs.n_rung_generate(), _n_rung_generate);
EXPECT_EQ(inputs.n_sis_select(), _n_sis_select);
EXPECT_EQ(inputs.n_residual(), _n_residual);
EXPECT_EQ(inputs.n_models_store(), _n_models_store);
EXPECT_EQ(inputs.max_param_depth(), _max_param_depth);
EXPECT_EQ(inputs.nlopt_seed(), _nlopt_seed);
EXPECT_EQ(inputs.fix_intercept(), _fix_intercept);
EXPECT_EQ(inputs.global_param_opt(), _global_param_opt);
EXPECT_EQ(inputs.reparam_residual(), _reparam_residual);
}
}
{
"desc_dim": 2,
"n_sis_select": 1,
"max_rung": 1,
"n_residual": 1,
"n_models_store": 1,
"n_rung_store": 1,
"data_file": "googletest/inputs/data.csv",
"property_key": "property",
"task_key": "task",
"leave_out_inds": [3],
"opset": ["sq", "cb"],
"param_opset": ["log"],
"fix_intercept": false,
"min_abs_feat_val": 1e-5,
"max_abs_feat_val": 1e8,
"max_param_depth": 1,
"nlopt_seed": 10,
"global_param_opt": true,
"reparam_residual": true
}
# Copyright 2021 Thomas A. R. Purcell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import numpy as np
from sissopp import (
FeatureNode,
Inputs,
Unit,
finalize_values_arr,
)
import matplotlib.pyplot as plt
def test_inputs():
finalize_values_arr()
inputs = Inputs()
sample_ids_train = ["a", "b", "c"]
inputs.sample_ids_train = sample_ids_train
assert inputs.sample_ids_train == sample_ids_train
sample_ids_test = ["d"]
inputs.sample_ids_test = sample_ids_test
assert inputs.sample_ids_test == sample_ids_test
task_sizes_train = [2, 1]
inputs.task_sizes_train = task_sizes_train
assert inputs.task_sizes_train == task_sizes_train
task_sizes_test = [1, 0]
inputs.task_sizes_test = task_sizes_test
assert inputs.task_sizes_test == task_sizes_test
task_names = ["task_1", "task_2"]
inputs.task_names = task_names
assert inputs.task_names == task_names
allowed_param_ops = ["log"]
inputs.allowed_param_ops = allowed_param_ops
assert inputs.allowed_param_ops == allowed_param_ops
allowed_ops = ["sq", "cb"]
inputs.allowed_ops = allowed_ops
assert inputs.allowed_ops == allowed_ops
prop_train = np.array([1.0, 4.0, 9.0])
inputs.prop_train = prop_train
assert np.all(inputs.prop_train == prop_train)
prop_test = np.array([16.0])
inputs.prop_test = prop_test
assert np.all(inputs.prop_test == prop_test)
leave_out_inds = [3]
inputs.leave_out_inds = leave_out_inds
assert inputs.leave_out_inds == leave_out_inds
phi_0 = [FeatureNode(0, "feat_1", [1.0, 2.0, 3.0], [4.0], Unit("m"))]
inputs.phi_0 = phi_0
assert inputs.phi_0[0].expr == phi_0[0].expr
prop_unit = Unit("m")
inputs.prop_unit = prop_unit
assert inputs.prop_unit == prop_unit
filename = "googletest/inputs/sisso.json"
inputs.filename = filename
assert inputs.filename == filename
data_file = "googletest/inputs/data.csv"
inputs.data_file = data_file
assert inputs.data_file == data_file
prop_key = "property"
inputs.prop_key = prop_key
assert inputs.prop_key == prop_key
prop_label = "property"
inputs.prop_label = prop_label
assert inputs.prop_label == prop_label
task_key = "task"
inputs.task_key = task_key
assert inputs.task_key == task_key
calc_type = "regression"
inputs.calc_type = calc_type
assert inputs.calc_type == calc_type
cross_cor_max = 1.0
inputs.cross_cor_max = cross_cor_max
assert inputs.cross_cor_max == cross_cor_max
l_bound = 1e-5
inputs.l_bound = l_bound
assert inputs.l_bound == l_bound
u_bound = 1e8
inputs.u_bound = u_bound
assert inputs.u_bound == u_bound
n_dim = 2