Commit c4673617 authored by Thomas Purcell's avatar Thomas Purcell
Browse files

Update nlopt_wrapper

Remove the treadprivate vectors that caused the valgrind errors
It may not have been an actual leak but don't risk it
parent 32370f4a
......@@ -65,6 +65,7 @@ FeatureSpace::FeatureSpace(
_start_gen(1, 0),
_feature_space_file("feature_space/selected_features.txt"),
_feature_space_summary_file("feature_space/SIS_summary.txt"),
_project_type(project_type),
_mpi_comm(mpi_comm),
_cross_cor_max(cross_corr_max),
_l_bound(min_abs_feat_val),
......@@ -77,10 +78,10 @@ FeatureSpace::FeatureSpace(
_n_rung_generate(n_rung_generate),
_max_param_depth(max_param_depth)
{
initialize_fs(project_type);
initialize_fs();
}
void FeatureSpace::initialize_fs(std::string project_type)
void FeatureSpace::initialize_fs()
{
#ifndef PARAMETERIZE
if(_allowed_param_ops.size() != 0)
......@@ -90,7 +91,7 @@ void FeatureSpace::initialize_fs(std::string project_type)
_max_param_depth = _max_phi;
if((_max_param_depth < 0) || (_max_param_depth > _max_phi))
throw std::logic_error("Invalid parameter depth.");
nlopt_wrapper::set_objective(project_type, _prop.data(), _task_sizes, _max_phi, _max_param_depth);
nlopt_wrapper::MAX_PARAM_DEPTH = _max_param_depth;
#endif
if(_n_rung_store == -1)
......@@ -112,17 +113,17 @@ void FeatureSpace::initialize_fs(std::string project_type)
sum_file_stream.close();
}
if(project_type.compare("regression") == 0)
if(_project_type.compare("regression") == 0)
{
_project = project_funcs::project_r2;
_project_no_omp = project_funcs::project_r2_no_omp;
}
else if(project_type.compare("classification") == 0)
else if(_project_type.compare("classification") == 0)
{
_project = project_funcs::project_classify;
_project_no_omp = project_funcs::project_classify_no_omp;
}
else if(project_type.compare("log_regression") == 0)
else if(_project_type.compare("log_regression") == 0)
{
if(_task_sizes.size() > 1)
throw std::logic_error("Log Regression can not be done using multiple tasks.");
......@@ -183,47 +184,73 @@ void FeatureSpace::initialize_fs(std::string project_type)
_scores.resize(_phi.size());
}
void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
{
int phi_ind = feat - _phi.begin();
feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
for(auto& op : _un_operators)
#ifdef PARAMETERIZE
void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, std::shared_ptr<NLOptimizer> optimizer, double l_bound, double u_bound)
{
op(feat_set, *feat, feat_ind, l_bound, u_bound);
}
int phi_ind = feat - _phi.begin();
feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
for(auto& op : _com_bin_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
}
for(auto& op : _un_operators)
{
op(feat_set, *feat, feat_ind, l_bound, u_bound);
}
for(auto& op : _bin_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
for(auto& op : _com_bin_operators)
{
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
}
for(auto& op : _bin_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
{
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
}
}
}
#ifdef PARAMETERIZE
for(auto& op : _un_param_operators)
op(feat_set, *feat, feat_ind, l_bound, u_bound, _prop);
op(feat_set, *feat, feat_ind, l_bound, u_bound, optimizer);
for(auto& op : _com_bin_param_operators)
for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
for(auto& op : _bin_param_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 != feat; ++feat_2)
{
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, _prop);
op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, _prop);
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound, optimizer);
op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound, optimizer);
}
}
#endif
}
}
#else
void FeatureSpace::generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound, double u_bound)
{
int phi_ind = feat - _phi.begin();
feat_set.reserve(feat_set.size() + _un_operators.size() + phi_ind * (_com_bin_operators.size() + 2 * _bin_operators.size()));
for(auto& op : _un_operators)
{
op(feat_set, *feat, feat_ind, l_bound, u_bound);
}
for(auto& op : _com_bin_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
}
for(auto& op : _bin_operators)
{
for(auto feat_2 = _phi.begin(); feat_2 < feat; ++feat_2)
{
op(feat_set, *feat, *feat_2, feat_ind, l_bound, u_bound);
op(feat_set, *feat_2, *feat, feat_ind, l_bound, u_bound);
}
}
}
#endif
void FeatureSpace::generate_feature_space()
{
......@@ -245,16 +272,31 @@ void FeatureSpace::generate_feature_space()
int feat_ind = _phi.size();
node_value_arrs::clear_temp_reg();
double start = omp_get_wtime();
#pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
{
std::vector<node_ptr> next_phi_private;
#pragma omp for schedule(dynamic)
for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
#ifdef PARAMETERIZE
#pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
{
std::vector<node_ptr> next_phi_private;
std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
#pragma omp critical
next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
}
#pragma omp for schedule(dynamic)
for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
generate_new_feats(feat_1, next_phi_private, feat_ind, optimizer, l_bound, u_bound);
#pragma omp critical
next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
}
#else
#pragma omp parallel firstprivate(feat_ind, l_bound, u_bound) default(shared)
{
std::vector<node_ptr> next_phi_private;
#pragma omp for schedule(dynamic)
for(auto feat_1 = _phi.begin() + _start_gen.back() + _mpi_comm->rank(); feat_1 < _phi.end(); feat_1 += _mpi_comm->size())
generate_new_feats(feat_1, next_phi_private, feat_ind, l_bound, u_bound);
#pragma omp critical
next_phi.insert(next_phi.end(), next_phi_private.begin(), next_phi_private.end());
}
#endif
_start_gen.push_back(_phi.size());
node_value_arrs::clear_temp_reg();
if((nn < _max_phi) || (nn <= _n_rung_store) || (_mpi_comm->size() == 1))
......@@ -523,6 +565,10 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
std::vector<node_ptr> phi_sel_private(phi_sel);
std::vector<double> scores_sel_private(scores_sel);
#ifdef PARAMETERIZE
std::shared_ptr<NLOptimizer> optimizer = nlopt_wrapper::get_optimizer(_project_type, _task_sizes, _prop, _max_phi, _max_param_depth);
#endif
int feat_ind = _phi.size();
#pragma omp for schedule(dynamic)
......@@ -533,7 +579,11 @@ void FeatureSpace::project_generated(double* prop, int size, std::vector<node_pt
bool is_sel = (*feat)->selected();
(*feat)->set_selected(false);
generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
#ifdef PARAMETERIZE
generate_new_feats(feat, generated_phi, feat_ind, optimizer, _l_bound, _u_bound);
#else
generate_new_feats(feat, generated_phi, feat_ind, _l_bound, _u_bound);
#endif
(*feat)->set_selected(is_sel);
if(generated_phi.size() == 0)
......
......@@ -61,6 +61,7 @@ class FeatureSpace
const std::vector<int> _task_sizes; //!< The number of elements in each task (training data)
std::vector<int> _start_gen; //!< list of the indexes where each generation starts in _phi
const std::string _project_type; //!< The type of projection that should be done during SIS
const std::string _feature_space_file; //!< File to store information about the selected features
const std::string _feature_space_summary_file; //!< File to store information about the selected features
......@@ -126,10 +127,8 @@ public:
/**
* @brief Initialize the feature set given a property vector
*
* @param prop The property trying to be learned
*/
void initialize_fs(std::string project_type);
void initialize_fs();
/**
* @brief Generate the full feature set from the allowed operators and initial feature set
......@@ -221,18 +220,45 @@ public:
*/
inline int n_rung_generate(){return _n_rung_generate;}
/**
* @brief Generate a new set of features from a single feature
* @details Take in the feature and perform all valid algebraic operations on it.
*
* @param feat The feature to spawn new features from
* @param feat_set The feature set to pull features from for combinations
* @param feat_ind starting index for the next feature generated
* @param l_bound lower bound for the absolute value of the feature
* @param u_bound upper bound for the abosulte value of the feature
*/
void generate_new_feats(std::vector<node_ptr>::iterator& feat, std::vector<node_ptr>& feat_set, int& feat_ind, double l_bound=1e-50, double u_bound=1e50);
#ifdef PARAMETERIZE
/**
* @brief Generate a new set of features from a single feature
* @details Take in the feature and perform all valid algebraic operations on it.
*
* @param feat The feature to spawn new features from
* @param feat_set The feature set to pull features from for combinations
* @param feat_ind starting index for the next feature generated
* @param optimizer The object used to optimize the parameterized features
* @param l_bound lower bound for the absolute value of the feature
* @param u_bound upper bound for the abosulte value of the feature
*/
void generate_new_feats(
std::vector<node_ptr>::iterator& feat,
std::vector<node_ptr>& feat_set,
int& feat_ind,
std::shared_ptr<NLOptimizer> optimizer,
double l_bound=1e-50,
double u_bound=1e50
);
#else
/**
* @brief Generate a new set of features from a single feature
* @details Take in the feature and perform all valid algebraic operations on it.
*
* @param feat The feature to spawn new features from
* @param feat_set The feature set to pull features from for combinations
* @param feat_ind starting index for the next feature generated
* @param l_bound lower bound for the absolute value of the feature
* @param u_bound upper bound for the abosulte value of the feature
*/
void generate_new_feats(
std::vector<node_ptr>::iterator& feat,
std::vector<node_ptr>& feat_set,
int& feat_ind,
double l_bound=1e-50,
double u_bound=1e50
);
#endif
/**
* @brief Calculate the SIS Scores for feature generated on the fly
* @details Create the next rung of features and calculate their projection scores. Only keep those that can be selected by SIS.
......
......@@ -337,13 +337,14 @@ public:
*/
virtual std::vector<double> parameters() = 0;
//DocString: op_node_get_params
/**
* @brief Solve the non-linear optimization to set the parameters
* @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
*
* @param prop property to fit to get the parameters
*/
virtual void get_parameters(std::vector<double>& prop) = 0;
virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer) = 0;
/**
* @brief Set the non-linear parameters
......@@ -357,7 +358,7 @@ public:
*/
virtual inline int n_params(int n_cur = 0, int depth = 1)
{
return (depth > nlopt_wrapper::_max_param_depth) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
return (depth > nlopt_wrapper::MAX_PARAM_DEPTH) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
}
/**
......@@ -488,33 +489,6 @@ public:
}
#ifdef PY_BINDINGS
// DocString: op_node_param_arr
/**
* @brief Solve the non-linear optimization to set the parameters
* @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
*
* @param prop property to fit to get the parameters
* @param param_list List describing the parameters to fit
*/
inline void get_parameters(np::ndarray prop)
{
std::vector<double> prop_vec = python_conv_utils::from_ndarray<double>(prop);
get_parameters(prop_vec);
}
// DocString: op_node_param_list
/**
* @brief Solve the non-linear optimization to set the parameters
* @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
*
* @param prop property to fit to get the parameters
* @param param_list List describing the parameters to fit
*/
inline void get_parameters(py::list prop)
{
std::vector<double> prop_vec = python_conv_utils::from_list<double>(prop);
get_parameters(prop_vec);
}
// DocString: op_node_set_param_list
/**
......
......@@ -156,7 +156,8 @@ public:
*
* @param prop property to fit to get the parameters
*/
virtual void get_parameters(std::vector<double>& prop){return;}
virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
{return;}
/**
* @brief Set the non-linear parameters
......@@ -191,7 +192,7 @@ public:
return fmt::format(
"|{:.10e}*{}{:+15.10e}|",
params[0],
(depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
params[1]
);
}
......@@ -209,7 +210,7 @@ public:
return fmt::format(
"\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
params[0],
(depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
params[1]
);
}
......
......@@ -2,10 +2,10 @@
BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsParamNode)
void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop)
void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
{
++feat_ind;
node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, prop);
node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, optimizer);
new_feat->set_value();
if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
......@@ -17,40 +17,40 @@ void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int&
AbsParamNode::AbsParamNode()
{}
AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop) :
AbsNode(feat, feat_ind)
AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
AbsNode(feat, feat_ind),
_sign_alpha(1.0)
{
_params.resize(n_params(), 0.0);
get_parameters(prop);
get_parameters(optimizer);
if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
throw InvalidFeatureException();
}
AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop) :
AbsNode(feat, feat_ind)
AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
AbsNode(feat, feat_ind),
_sign_alpha(1.0)
{
_params.resize(n_params(), 0.0);
get_parameters(prop);
get_parameters(optimizer);
}
AbsParamNode::AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound) :
AbsNode(feat, feat_ind)
AbsNode(feat, feat_ind),
_sign_alpha(1.0)
{
_params.resize(n_params(), 0.0);
}
void AbsParamNode::get_parameters(std::vector<double>& prop)
void AbsParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
{
nlopt_wrapper::feat_data d;
d._feat = this;
d._prop = prop.data();
double min_res = nlopt_wrapper::optimize_feature_params(d);
_sign_alpha = 1.0;
double min_res = optimizer->optimize_feature_params(this);
std::vector<double> param_cp(_params);
_sign_alpha = -1.0;
if(nlopt_wrapper::optimize_feature_params(d) > min_res)
if(optimizer->optimize_feature_params(this) > min_res)
{
std::copy_n(param_cp.data(), param_cp.size(), _params.data());
_sign_alpha = 1.0;
......@@ -61,7 +61,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
{
bool is_root = (offset == -1);
offset = (offset == -1) ? rung() : offset;
double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->value_ptr(offset + 2);
if(_selected && is_root)
allowed_op_funcs::abs(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_d_matrix_ptr(_d_mat_ind));
......@@ -71,7 +71,7 @@ void AbsNode::set_value(const double* params, int offset, int depth)
void AbsNode::set_test_value(const double* params, int offset, int depth)
{
offset = (offset == -1) ? rung() : offset;
double* vp_0 = (depth < nlopt_wrapper::_max_param_depth) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, offset + 2, depth + 1) : _feats[0]->test_value_ptr(offset + 2);
allowed_op_funcs::abs(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, offset, false));
}
......@@ -81,9 +81,8 @@ void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
lb[0] = 1.0;
ub[0] = 1.0;
if(depth >= nlopt_wrapper::_max_param_depth)
if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
return;
_feats[0]->set_bounds(lb + 2, ub + 2);
}
......@@ -92,7 +91,7 @@ void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth
lb[0] = _sign_alpha;
ub[0] = _sign_alpha;
if(depth >= nlopt_wrapper::_max_param_depth)
if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
return;
_feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
......
......@@ -63,7 +63,7 @@ public:
* @param param_list The list of parameters to optimize using non-linear least squares
* @param prop The property to fit to
*/
AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
AbsParamNode(node_ptr feat, int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
/**
* @brief Constructor
......@@ -73,7 +73,7 @@ public:
* @param feat_ind Index of the new feature
* @param prop The property to fit to
*/
AbsParamNode(node_ptr feat, int feat_ind, std::vector<double>& prop);
AbsParamNode(node_ptr feat, int feat_ind, std::shared_ptr<NLOptimizer> optimizer);
/**
* @brief Constructor
......@@ -146,7 +146,8 @@ public:
*
* @param prop property to fit to get the parameters
*/
void get_parameters(std::vector<double>& prop);
void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
;
/**
* @brief Set the non-linear parameters
......@@ -177,7 +178,6 @@ public:
*/
void update_postfix(std::string& cur_expr, bool add_params=true)
{
std::stringstream postfix;
postfix << get_postfix_term();
if(add_params)
......@@ -191,6 +191,6 @@ public:
}
};
void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::vector<double>& prop);
void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
#endif
......@@ -167,7 +167,8 @@ public:
*
* @param prop property to fit to get the parameters
*/
virtual void get_parameters(std::vector<double>& prop){return;}
virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
{return;}
/**
* @brief Set the non-linear parameters
......@@ -201,9 +202,9 @@ public:
{
return fmt::format(
"|{} - ({:.10e}*{}{:+15.10e})|",
(depth < nlopt_wrapper::_max_param_depth ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
params[0],
(depth < nlopt_wrapper::_max_param_depth ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
params[1]
);
}
......@@ -220,9 +221,9 @@ public:
{
return fmt::format(
"\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
(depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
params[0],
(depth < nlopt_wrapper::_max_param_depth ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
(depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
params[1]
);
}
......