diff --git a/src/feature_creation/node/FeatureNode.cpp b/src/feature_creation/node/FeatureNode.cpp
index 1d3b293bf003fb8413456e2a1f1ab84992f23e0c..b888799116a4f2a20f3db21786826323f303a1cd 100644
--- a/src/feature_creation/node/FeatureNode.cpp
+++ b/src/feature_creation/node/FeatureNode.cpp
@@ -39,9 +39,13 @@ bool FeatureNode::is_const()
 void FeatureNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     if(add_sub_leaves.count(_expr) > 0)
+    {
         add_sub_leaves[_expr] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[_expr] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -49,9 +53,13 @@ void FeatureNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leav
 void FeatureNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
 {
     if(div_mult_leaves.count(_expr) > 0)
+    {
         div_mult_leaves[_expr] += fact;
+    }
     else
+    {
         div_mult_leaves[_expr] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -66,9 +74,13 @@ std::map<int, int> FeatureNode::primary_feature_decomp()
 void FeatureNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp)
 {
     if(pf_decomp.count(_arr_ind) > 0)
+    {
         pf_decomp[_arr_ind] += 1;
+    }
     else
+    {
         pf_decomp[_arr_ind] = 1;
+    }
 }
 
 // BOOST_CLASS_EXPORT(FeatureNode)
diff --git a/src/feature_creation/node/FeatureNode.hpp b/src/feature_creation/node/FeatureNode.hpp
index aad80475ed702bfcc19693a88ba7e493dfc1561a..46b15d572b68be2046a509291351169a84a37c10 100644
--- a/src/feature_creation/node/FeatureNode.hpp
+++ b/src/feature_creation/node/FeatureNode.hpp
@@ -21,8 +21,8 @@
 #include <feature_creation/node/Node.hpp>
 
 #ifdef PY_BINDINGS
-    namespace np = boost::python::numpy;
-    namespace py = boost::python;
+namespace np = boost::python::numpy;
+namespace py = boost::python;
 #endif
 
 // DocString: cls_feat_node
@@ -72,27 +72,27 @@ public:
      */
     FeatureNode(unsigned long int feat_ind, std::string expr, std::vector<double> value, std::vector<double> test_value, Unit unit, bool set_val = true);
     #ifdef PY_BINDINGS
-        /**
-         * @brief Constructs a feature node using numpy arrays (cpp definition in <python/feature_creation/FeatureNode.cpp)
-         *
-         * @param feat_ind index of the feature
-         * @param expr Expression for the feature
-         * @param value Value of the feature for each sample
-         * @param value Value of the feature for each test sample
-         * @param unit Unit of the feature
-         */
-        FeatureNode(unsigned long int feat_ind, std::string expr, np::ndarray value, np::ndarray test_value, Unit unit);
-
-        /**
-         * @brief Constructs a feature node using Python lists (cpp definition in <python/feature_creation/FeatureNode.cpp)
-         *
-         * @param feat_ind index of the feature
-         * @param expr Expression for the feature
-         * @param value Value of the feature for each sample
-         * @param value Value of the feature for each test sample
-         * @param unit Unit of the feature
-         */
-        FeatureNode(unsigned long int feat_ind, std::string expr, py::list value, py::list test_value, Unit unit);
+    /**
+     * @brief Constructs a feature node using numpy arrays (cpp definition in <python/feature_creation/FeatureNode.cpp)
+     *
+     * @param feat_ind index of the feature
+     * @param expr Expression for the feature
+     * @param value Value of the feature for each sample
+     * @param value Value of the feature for each test sample
+     * @param unit Unit of the feature
+     */
+    FeatureNode(unsigned long int feat_ind, std::string expr, np::ndarray value, np::ndarray test_value, Unit unit);
+
+    /**
+     * @brief Constructs a feature node using Python lists (cpp definition in <python/feature_creation/FeatureNode.cpp)
+     *
+     * @param feat_ind index of the feature
+     * @param expr Expression for the feature
+     * @param value Value of the feature for each sample
+     * @param value Value of the feature for each test sample
+     * @param unit Unit of the feature
+     */
+    FeatureNode(unsigned long int feat_ind, std::string expr, py::list value, py::list test_value, Unit unit);
     #endif
 
     /**
@@ -171,7 +171,10 @@ public:
      *
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      */
-    inline void set_value(int offset=-1, bool for_comp=false){std::copy_n(_value.data(), _n_samp, value_ptr());}
+    inline void set_value(int offset=-1, bool for_comp=false)
+    {
+        std::copy_n(_value.data(), _n_samp, value_ptr());
+    }
 
     // DocString: feat_node_set_test_value
     /**
@@ -179,13 +182,19 @@ public:
      *
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      */
-    inline void set_test_value(int offset=-1, bool for_comp=false){if(!_selected) std::copy_n(_test_value.data(), _n_test_samp, test_value_ptr());}
+    inline void set_test_value(int offset=-1, bool for_comp=false)
+    {
+        if(!_selected) std::copy_n(_test_value.data(), _n_test_samp, test_value_ptr());
+    }
 
     // DocString: feat_node_is_nan
     /**
      * @brief Check if the feature contains NaN
      */
-    inline bool is_nan(){return std::any_of(value_ptr(), value_ptr() + _n_samp, [](double d){return !std::isfinite(d);});}
+    inline bool is_nan()
+    {
+        return std::any_of(value_ptr(), value_ptr() + _n_samp, [](double d){return !std::isfinite(d);});
+    }
 
     // DocString: feat_node_is_const
     /**
@@ -203,14 +212,20 @@ public:
      *
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      */
-    inline double* value_ptr(int offset=-1, bool for_comp=false){return _selected ? node_value_arrs::get_d_matrix_ptr(_d_mat_ind) : node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);}
+    inline double* value_ptr(int offset=-1, bool for_comp=false)
+    {
+        return _selected ? node_value_arrs::get_d_matrix_ptr(_d_mat_ind) : node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);
+    }
 
     /**
      * @brief The pointer to where the feature's test data is stored
      *
      * @param offset(int) Key to determine which part of the temporary storage array to look into
      */
-    inline double* test_value_ptr(int offset=-1, bool for_comp=false){return node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);}
+    inline double* test_value_ptr(int offset=-1, bool for_comp=false)
+    {
+        return node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, 0, offset, for_comp);
+    }
 
     // DocString: feat_node_rung
     /**
@@ -241,7 +256,10 @@ public:
      * @param cur_expr The current expression
      * @return The current postfix expression of the feature
      */
-    inline void update_postfix(std::string& cur_expr, bool add_params=true){cur_expr = get_postfix_term() + "|" + cur_expr;};
+    inline void update_postfix(std::string& cur_expr, bool add_params=true)
+    {
+        cur_expr = get_postfix_term() + "|" + cur_expr;
+    }
 
     /**
      * @brief Get the three character representation of the operator
@@ -266,7 +284,9 @@ public:
     inline node_ptr feat(int ind)
     {
         if(ind > 0)
+        {
             throw std::logic_error("Index not found in _feats");
+        }
         return nullptr;
     }
 
@@ -288,107 +308,110 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        inline std::vector<double> parameters(){return std::vector<double>();};
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        inline void set_parameters(std::vector<double> params, bool check_sz=true){};
-
-        /**
-         * @brief returns the number of parameters for this feature
-         * @return the number of parameters (_params.size())
-         */
-        inline int n_params(int n_cur=0, int depth = 1){return n_cur;};
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        inline void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_value(offset);};
-
-        /**
-         * @brief The pointer to where the feature's training data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        inline double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return value_ptr(offset);};
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        inline void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_test_value(offset);};
-
-        /**
-         * @brief The pointer to where the feature's test data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        inline double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return test_value_ptr(offset);};
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth = 1){return _expr;};
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1){return str_utils::latexify(_expr);}
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1){};
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param samp_ind sample index number
-         */
-        void param_derivative(const double* params, double* dfdp){}
-
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param samp_ind sample index to calculate the gradient for
-         */
-        virtual void gradient(double* grad, double* dfdp){throw std::logic_error("Asking for the gradient of non-parameterized feature");}
-
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param samp_ind sample index to calculate the gradient for
-         * @param params pointer to the parameters vector
-         */
-        inline void gradient(double* grad, double* dfdp, const double* params){};
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    inline std::vector<double> parameters(){return std::vector<double>();};
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    inline void set_parameters(std::vector<double> params, bool check_sz=true){};
+
+    /**
+     * @brief returns the number of parameters for this feature
+     * @return the number of parameters (_params.size())
+     */
+    inline int n_params(int n_cur=0, int depth = 1){return n_cur;};
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    inline void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_value(offset);};
+
+    /**
+     * @brief The pointer to where the feature's training data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    inline double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return value_ptr(offset);};
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    inline void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0){set_test_value(offset);};
+
+    /**
+     * @brief The pointer to where the feature's test data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    inline double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0){return test_value_ptr(offset);};
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth = 1){return _expr;};
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1){return str_utils::latexify(_expr);}
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    inline void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1){};
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param samp_ind sample index number
+     */
+    void param_derivative(const double* params, double* dfdp){}
+
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param samp_ind sample index to calculate the gradient for
+     */
+    virtual void gradient(double* grad, double* dfdp)
+    {
+        throw std::logic_error("Asking for the gradient of non-parameterized feature");
+    }
+
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param samp_ind sample index to calculate the gradient for
+     * @param params pointer to the parameters vector
+     */
+    inline void gradient(double* grad, double* dfdp, const double* params){};
     #endif
 };
 
diff --git a/src/feature_creation/node/ModelNode.cpp b/src/feature_creation/node/ModelNode.cpp
index 7d2d51f3e03e75a91c7bc7da3bee191d1c93cfde..c15869bcf6c02f3360bd44e25bb26d4be545cbaf 100644
--- a/src/feature_creation/node/ModelNode.cpp
+++ b/src/feature_creation/node/ModelNode.cpp
@@ -31,8 +31,19 @@ ModelNode::ModelNode(
     _w_remap_svm = w_remap_svm_temp;
     _b_remap_svm = b_remap_svm_temp;
 
-    std::transform(_value.begin(), _value.end(), _value_svm.begin(), [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;});
-    std::transform(_test_value.begin(), _test_value.end(), _test_value_svm.begin(), [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;});
+    std::transform(
+        _value.begin(),
+        _value.end(),
+        _value_svm.begin(),
+        [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;}
+    );
+
+    std::transform(
+        _test_value.begin(),
+        _test_value.end(),
+        _test_value_svm.begin(),
+        [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;}
+    );
 
     generate_fxn_list();
 }
@@ -55,8 +66,19 @@ ModelNode::ModelNode(node_ptr in_node) :
     _w_remap_svm = w_remap_svm_temp;
     _b_remap_svm = b_remap_svm_temp;
 
-    std::transform(_value.begin(), _value.end(), _value_svm.begin(), [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;});
-    std::transform(_test_value.begin(), _test_value.end(), _test_value_svm.begin(), [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;});
+    std::transform(
+        _value.begin(),
+        _value.end(),
+        _value_svm.begin(),
+        [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;}
+    );
+
+    std::transform(
+        _test_value.begin(),
+        _test_value.end(),
+        _test_value_svm.begin(),
+        [w_remap_svm_temp, b_remap_svm_temp](double val){return (val - b_remap_svm_temp) * w_remap_svm_temp;}
+    );
 
     generate_fxn_list();
 }
@@ -87,358 +109,729 @@ void ModelNode::generate_fxn_list()
             if(op_terms[0] == "add")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                            stack.pop_back();
+                        },
+                        2
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]); stack.pop_back();}, 2));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::add(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "sub")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                            stack.pop_back();
+                        },
+                        2
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]); stack.pop_back();}, 2));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sub(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "abd")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                            stack.pop_back();
+                        },
+                        2
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]); stack.pop_back();}, 2));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::abs_diff(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "mult")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                            stack.pop_back();
+                        },
+                        2
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]); stack.pop_back();}, 2));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::mult(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "div")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();
+                        },
+                        2
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]); stack.pop_back();}, 2));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]); stack.pop_back();}, 2));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::div(n_samp, stack[stack.size() - 2], stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 2]);
+                                stack.pop_back();
+                            },
+                            2
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "abs")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::abs(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "inv")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::inv(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "exp")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "nexp")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::neg_exp(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "log")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::log(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::log(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::log(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::log(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::log(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::log(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "sin")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sin(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "cos")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cos(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "sq")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sq(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "sqrt")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sqrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "cb")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cb(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "cbrt")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::cbrt(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else if(op_terms[0] == "sp")
             {
                 #ifndef PARAMETERIZE
-                    fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
+                fxn_list_rev.push_back(
+                    std::make_pair(
+                        [=](int n_samp, std::vector<double*>& stack){
+                            allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                        },
+                        1
+                    )
+                );
                 #else
-                    if((op_terms.size() > 1) && (params.size() == 0))
-                    {
-                        params.resize(op_terms.size() - 1);
-                        std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
-                    }
-                    if(params.size() > 0)
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);}, 1));
-                        pp += 2;
-                    }
-                    else
-                    {
-                        fxn_list_rev.push_back(std::make_pair([=](int n_samp, std::vector<double*>& stack){allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);}, 1));
-                    }
+                if((op_terms.size() > 1) && (params.size() == 0))
+                {
+                    params.resize(op_terms.size() - 1);
+                    std::transform(op_terms.begin() + 1, op_terms.end(), params.begin(), [](std::string s){return std::stod(s);});
+                }
+                if(params.size() > 0)
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], params[pp], params[pp + 1], stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                    pp += 2;
+                }
+                else
+                {
+                    fxn_list_rev.push_back(
+                        std::make_pair(
+                            [=](int n_samp, std::vector<double*>& stack){
+                                allowed_op_funcs::sixth_pow(n_samp, stack[stack.size() - 1], 1.0, 0.0, stack[stack.size() - 1]);
+                            },
+                            1
+                        )
+                    );
+                }
                 #endif
             }
             else
@@ -470,7 +863,9 @@ double ModelNode::eval(double* x_in)
         catch(const std::invalid_argument e)
         {
             if(stack.size() < _fxn_list[ff].second)
+            {
                 throw std::logic_error("Wrong number of features stored in the stack.");
+            }
             _fxn_list[ff].first(1, stack);
             ++ff;
         }
@@ -484,7 +879,9 @@ double ModelNode::eval(double* x_in)
 double ModelNode::eval(std::vector<double> x_in)
 {
     if(x_in.size() > _n_leaves)
+    {
         throw std::logic_error("Incorrect number of variables passed to eval operator.");
+    }
     return eval(x_in.data());
 }
 
@@ -494,9 +891,13 @@ double ModelNode::eval(std::map<std::string, double> x_in_dct)
     for(auto& in_expr : get_x_in_expr_list())
     {
         if(x_in_dct.count(in_expr) == 0)
+        {
             throw std::logic_error("The value of " + in_expr + " is not in x_in_dct.");
+        }
         else if(x_in_dct.count(in_expr) > 1)
+        {
             throw std::logic_error("Multiple values of " + in_expr + " defined in x_in_dct.");
+        }
 
         x_in.push_back(x_in_dct[in_expr]);
     }
@@ -521,13 +922,17 @@ std::vector<double> ModelNode::eval(std::vector<double>* x_in)
         catch(const std::invalid_argument e)
         {
             if(stack.size() < _fxn_list[ff].second)
+            {
                 throw std::logic_error("Wrong number of features stored in the stack.");
+            }
             _fxn_list[ff].first((x_in)->size(), stack);
             ++ff;
         }
     }
     if(stack.size() != 1)
+    {
         throw std::logic_error("The final stack size is not one, something wrong happened during the calculation.");
+    }
 
     std::vector<double> to_ret(x_in->size());
     std::copy_n(stack[0], to_ret.size(), to_ret.data());
@@ -537,13 +942,17 @@ std::vector<double> ModelNode::eval(std::vector<double>* x_in)
 std::vector<double> ModelNode::eval(std::vector<std::vector<double>> x_in)
 {
     if(x_in.size() > n_leaves())
+    {
         throw std::logic_error("Incorrect number of variables passed to eval operator.");
+    }
 
     int x_in_sz = x_in[0].size();
 
     bool same_szs = std::accumulate(x_in.begin(), x_in.end(), true, [&x_in_sz](bool cond, std::vector<double> x){return cond && (x.size() == x_in_sz);});
     if(!same_szs)
+    {
         throw std::logic_error("Not all vectors in x_in are the same size.");
+    }
 
     return eval(x_in.data());
 }
@@ -554,9 +963,13 @@ std::vector<double> ModelNode::eval(std::map<std::string, std::vector<double>> x
     for(auto& in_expr : get_x_in_expr_list())
     {
         if(x_in_dct.count(in_expr) == 0)
+        {
             throw std::logic_error("The value of " + in_expr + " is not in x_in_dct.");
+        }
         else if(x_in_dct.count(in_expr) > 1)
+        {
             throw std::logic_error("Multiple values of " + in_expr + " defined in x_in_dct.");
+        }
 
         x_in.push_back(x_in_dct[in_expr]);
     }
@@ -570,9 +983,13 @@ ModelNode::~ModelNode()
 void ModelNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     if(add_sub_leaves.count(_expr) > 0)
+    {
         add_sub_leaves[_expr] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[_expr] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -580,9 +997,13 @@ void ModelNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves
 void ModelNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot)
 {
     if(div_mult_leaves.count(_expr) > 0)
+    {
         div_mult_leaves[_expr] += fact;
+    }
     else
+    {
         div_mult_leaves[_expr] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -596,9 +1017,13 @@ std::map<int, int> ModelNode::primary_feature_decomp()
         try
         {
             if(pf_decomp.count(std::stoi(part)))
+            {
                 ++pf_decomp[std::stoi(part)];
+            }
             else
+            {
                 pf_decomp[std::stoi(part)] = 1;
+            }
         }
         catch(const std::invalid_argument e)
         {
@@ -618,9 +1043,13 @@ void ModelNode::update_primary_feature_decomp(std::map<int, int>& pf_decomp)
         try
         {
             if(pf_decomp.count(std::stoi(part)))
+            {
                 ++pf_decomp[std::stoi(part)];
+            }
             else
+            {
                 pf_decomp[std::stoi(part)] = 1;
+            }
         }
         catch(const std::invalid_argument e)
         {
diff --git a/src/feature_creation/node/ModelNode.hpp b/src/feature_creation/node/ModelNode.hpp
index 8f05c89153ed6a8e6964095f00f8b5b4b2187921..ea54ec5dd1f60f770b0923693a88500f3414ebd8 100644
--- a/src/feature_creation/node/ModelNode.hpp
+++ b/src/feature_creation/node/ModelNode.hpp
@@ -253,14 +253,14 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PY_BINDINGS
-        inline double eval_py(np::ndarray x_in){return eval(python_conv_utils::from_ndarray<double>(x_in));}
-        inline double eval_py(py::list x_in){return eval(python_conv_utils::from_list<double>(x_in));}
-        inline double eval_py(py::dict x_in){return eval(python_conv_utils::from_dict<std::string, double>(x_in));}
+    inline double eval_py(np::ndarray x_in){return eval(python_conv_utils::from_ndarray<double>(x_in));}
+    inline double eval_py(py::list x_in){return eval(python_conv_utils::from_list<double>(x_in));}
+    inline double eval_py(py::dict x_in){return eval(python_conv_utils::from_dict<std::string, double>(x_in));}
 
-        np::ndarray eval_many_py(np::ndarray x_in);
-        np::ndarray eval_many_py(py::dict x_in);
+    np::ndarray eval_many_py(np::ndarray x_in);
+    np::ndarray eval_many_py(py::dict x_in);
 
-        inline py::list x_in_expr_list_py(){return python_conv_utils::to_list<std::string>(_x_in_expr_list);}
+    inline py::list x_in_expr_list_py(){return python_conv_utils::to_list<std::string>(_x_in_expr_list);}
     #endif
 };
 
diff --git a/src/feature_creation/node/Node.hpp b/src/feature_creation/node/Node.hpp
index a0f30fa2ce9f9d7eeb6e6ac2526b27ce6862453b..7b25e4056268c7803cafcdbf0e4e66bbd2e3a188 100644
--- a/src/feature_creation/node/Node.hpp
+++ b/src/feature_creation/node/Node.hpp
@@ -13,9 +13,6 @@
 
 #include <feature_creation/node/value_storage/nodes_value_containers.hpp>
 #include <feature_creation/units/Unit.hpp>
-#ifdef PY_BINDINGS
-    #include <python/conversion_utils.hpp>
-#endif
 #include <utils/math_funcs.hpp>
 #include <utils/enum.hpp>
 
@@ -28,8 +25,9 @@
 #include <boost/serialization/unique_ptr.hpp>
 
 #ifdef PY_BINDINGS
-    namespace py = boost::python;
-    namespace np = boost::python::numpy;
+#include <python/conversion_utils.hpp>
+namespace py = boost::python;
+namespace np = boost::python::numpy;
 #endif
 
 // DocString: cls_node
@@ -340,107 +338,107 @@ public:
 
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters() = 0;
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true) = 0;
-
-        /**
-         * @brief returns the number of parameters for this feature
-         * @return the number of parameters (_params.size())
-         */
-        virtual int n_params(int n_cur = 0, int depth = 1) = 0;
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief The pointer to where the feature's training data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        virtual double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief The pointer to where the feature's test data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        virtual double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        virtual std::string expr(double* params, int depth = 1) = 0;
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        virtual std::string get_latex_expr(double* params, int depth=1) = 0;
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param samp_ind sample index number
-         */
-        virtual void param_derivative(const double* params, double* dfdp) = 0;
-
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param samp_ind sample index to calculate the gradient for
-         */
-        virtual void gradient(double* grad, double* dfdp) = 0;
-
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param samp_ind sample index to calculate the gradient for
-         * @param params pointer to the parameters vector
-         */
-        virtual void gradient(double* grad, double* dfdp, const double* params) = 0;
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters() = 0;
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true) = 0;
+
+    /**
+     * @brief returns the number of parameters for this feature
+     * @return the number of parameters (_params.size())
+     */
+    virtual int n_params(int n_cur = 0, int depth = 1) = 0;
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+
+    /**
+     * @brief The pointer to where the feature's training data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    virtual double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+
+    /**
+     * @brief The pointer to where the feature's test data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    virtual double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    virtual std::string expr(double* params, int depth = 1) = 0;
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    virtual std::string get_latex_expr(double* params, int depth=1) = 0;
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param samp_ind sample index number
+     */
+    virtual void param_derivative(const double* params, double* dfdp) = 0;
+
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param samp_ind sample index to calculate the gradient for
+     */
+    virtual void gradient(double* grad, double* dfdp) = 0;
+
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param samp_ind sample index to calculate the gradient for
+     * @param params pointer to the parameters vector
+     */
+    virtual void gradient(double* grad, double* dfdp, const double* params) = 0;
     #endif
 
     //DocString: node_nfeats
@@ -461,35 +459,35 @@ public:
 
     #ifdef PY_BINDINGS
 
-        // DocString: node_value_py
-        /**
-         * @brief The training data of the feature
-         * @return The training data as a numpy array
-         */
-        inline np::ndarray value_py(){return python_conv_utils::to_ndarray<double>(value());}
-
-        // DocString: node_test_value_py
-        /**
-         * @brief The test data of the feature
-         * @return The test data as a numpy array
-         */
-        inline np::ndarray test_value_py(){return python_conv_utils::to_ndarray<double>(test_value());}
-
-        #ifdef PARAMETERIZE
-            // DocString: node_parameters_py
-            /**
-             * @brief The parameters used for non-linear operator nodes
-             * @return The operator node parameters as a list [alpha, a]
-             */
-            inline py::list parameters_py(){return python_conv_utils::to_list<double>(parameters());}
-        #endif
-
-        // DocString: node_primary_feature_decomp
-        /**
-         * @brief Get the primary feature decomposition of a feature
-         * @return A python dict representing the primary feature comprising a feature
-         */
-        inline py::dict primary_feature_decomp_py(){return python_conv_utils::to_dict<int, int>(primary_feature_decomp());}
+    // DocString: node_value_py
+    /**
+     * @brief The training data of the feature
+     * @return The training data as a numpy array
+     */
+    inline np::ndarray value_py(){return python_conv_utils::to_ndarray<double>(value());}
+
+    // DocString: node_test_value_py
+    /**
+     * @brief The test data of the feature
+     * @return The test data as a numpy array
+     */
+    inline np::ndarray test_value_py(){return python_conv_utils::to_ndarray<double>(test_value());}
+
+    #ifdef PARAMETERIZE
+    // DocString: node_parameters_py
+    /**
+     * @brief The parameters used for non-linear operator nodes
+     * @return The operator node parameters as a list [alpha, a]
+     */
+    inline py::list parameters_py(){return python_conv_utils::to_list<double>(parameters());}
+    #endif
+
+    // DocString: node_primary_feature_decomp
+    /**
+     * @brief Get the primary feature decomposition of a feature
+     * @return A python dict representing the primary feature comprising a feature
+     */
+    inline py::dict primary_feature_decomp_py(){return python_conv_utils::to_dict<int, int>(primary_feature_decomp());}
 
     #endif
 };
diff --git a/src/feature_creation/node/operator_nodes/OperatorNode.hpp b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
index 6c481a43c265ff2f2e9147ce430a3836097b152a..58ef1008c31f6c111902ecb4056782bb005c1ed8 100644
--- a/src/feature_creation/node/operator_nodes/OperatorNode.hpp
+++ b/src/feature_creation/node/operator_nodes/OperatorNode.hpp
@@ -27,10 +27,10 @@
 #endif
 
 #ifdef PY_BINDINGS
-    #include <python/conversion_utils.hpp>
+#include <python/conversion_utils.hpp>
 
-    namespace py = boost::python;
-    namespace np = boost::python::numpy;
+namespace py = boost::python;
+namespace np = boost::python::numpy;
 #endif
 
 // DocString: cls_op_node
@@ -193,7 +193,9 @@ public:
     virtual double* value_ptr(int offset=-1, bool for_comp=false)
     {
         if(_selected && (offset == -1))
+        {
             return node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+        }
         offset += (offset == -1);
         if((rung() > node_value_arrs::N_RUNGS_STORED) && (node_value_arrs::temp_storage_reg(_arr_ind, rung(), offset, for_comp) != _feat_ind))
         {
@@ -283,7 +285,9 @@ public:
     void update_primary_feature_decomp(std::map<int, int>& pf_decomp)
     {
         for(auto& feat : _feats)
+        {
             feat->update_primary_feature_decomp(pf_decomp);
+        }
     }
 
     /**
@@ -301,7 +305,9 @@ public:
         postfix << get_postfix_term();
         cur_expr = postfix.str() + "|" + cur_expr;
         for(int nn = N - 1; nn >= 0; --nn)
+        {
             _feats[nn]->update_postfix(cur_expr);
+        }
     }
 
     /**
@@ -327,7 +333,9 @@ public:
     inline node_ptr feat(int ind)
     {
         if(ind > N)
+        {
             throw std::logic_error("Index not found in _feats");
+        }
         return _feats[ind];
     }
 
@@ -349,176 +357,187 @@ public:
     virtual void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot) = 0;
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters() = 0;
-
-        //DocString: op_node_get_params
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer) = 0;
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true) = 0;
-
-        // DocString: op_node_n_params
-        /**
-         * @brief returns the number of theoretical parameters for this feature
-         * @return the number of theoretical parameters
-         */
-        virtual inline int n_params(int n_cur = 0, int depth = 1)
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters() = 0;
+
+    //DocString: op_node_get_params
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer) = 0;
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true) = 0;
+
+    // DocString: op_node_n_params
+    /**
+     * @brief returns the number of theoretical parameters for this feature
+     * @return the number of theoretical parameters
+     */
+    virtual inline int n_params(int n_cur = 0, int depth = 1)
+    {
+        if(depth > nlopt_wrapper::MAX_PARAM_DEPTH)
         {
-            return (depth > nlopt_wrapper::MAX_PARAM_DEPTH) ? 0 : std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
+            return 0;
         }
 
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief The pointer to where the feature's training data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
-        {
-            if(_selected && (offset == -1))
-                return node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+        return std::accumulate(_feats.begin(), _feats.end(), 2, [&](double tot, node_ptr feat){return tot + feat->n_params(0, depth + 1);});
+    }
 
-            offset += (offset == -1);
-            set_value(params, offset, for_comp, depth);
-            return node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
-        }
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    virtual void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
 
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
-
-        /**
-         * @brief The pointer to where the feature's test data is stored
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         * @returns the pointer to the feature's data
-         */
-        double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
+    /**
+     * @brief The pointer to where the feature's training data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    double* value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
+    {
+        if(_selected && (offset == -1))
         {
-            offset += (offset == -1);
-            set_test_value(params, offset, for_comp, depth);
-
-            return node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+            return node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
         }
 
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        virtual std::string expr(double* params, int depth = 1) = 0;
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         */
-        virtual std::string get_latex_expr(double* params, int depth=1) = 0;
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        virtual void param_derivative(const double* params, double* dfdp) = 0;
-
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        void gradient(double* grad, double* dfdp)
-        {
-            if(n_params() == 0)
-                throw std::logic_error("Asking for the gradient of non-parameterized feature");
+        offset += (offset == -1);
+        set_value(params, offset, for_comp, depth);
+        return node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
-            std::fill_n(grad, n_params(), 1.0);
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    virtual void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=0) = 0;
 
-            gradient(grad, dfdp, parameters().data());
-        }
+    /**
+     * @brief The pointer to where the feature's test data is stored
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     * @returns the pointer to the feature's data
+     */
+    double* test_value_ptr(const double* params, int offset=-1, bool for_comp=false, int depth=0)
+    {
+        offset += (offset == -1);
+        set_test_value(params, offset, for_comp, depth);
+
+        return node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    virtual std::string expr(double* params, int depth = 1) = 0;
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     */
+    virtual std::string get_latex_expr(double* params, int depth=1) = 0;
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1) = 0;
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    virtual void param_derivative(const double* params, double* dfdp) = 0;
 
-        /**
-         * @brief Get the parameter gradient for non-linear optimization
-         *
-         * @param grad pointer to the gradient storage
-         * @param dfdp pointer to where the feature derivative pointers are located
-         * @param params pointer to the parameters vector
-         */
-        void gradient(double* grad, double* dfdp, const double* params)
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    void gradient(double* grad, double* dfdp)
+    {
+        if(n_params() == 0)
         {
-            int np = n_params();
-            param_derivative(params, dfdp);
-            double* val_ptr = _feats[N - 1]->value_ptr(params);
+            throw std::logic_error("Asking for the gradient of non-parameterized feature");
+        }
 
-            std::transform(dfdp, dfdp + _n_samp, grad, grad, std::multiplies<double>());
-            std::transform(val_ptr, val_ptr + _n_samp, grad, grad, std::multiplies<double>());
+        std::fill_n(grad, n_params(), 1.0);
 
-            std::transform(dfdp, dfdp + _n_samp, grad + _n_samp, grad + _n_samp, std::multiplies<double>());
+        gradient(grad, dfdp, parameters().data());
+    }
+
+    /**
+     * @brief Get the parameter gradient for non-linear optimization
+     *
+     * @param grad pointer to the gradient storage
+     * @param dfdp pointer to where the feature derivative pointers are located
+     * @param params pointer to the parameters vector
+     */
+    void gradient(double* grad, double* dfdp, const double* params)
+    {
+        int np = n_params();
+        param_derivative(params, dfdp);
+        double* val_ptr = _feats[N - 1]->value_ptr(params);
 
-            for(int pp = 2; pp  < np; ++pp)
-                std::transform(dfdp, dfdp + _n_samp, grad + pp * _n_samp, grad + pp * _n_samp, [params](double d, double g){return params[0] * d * g;});
+        std::transform(dfdp, dfdp + _n_samp, grad, grad, std::multiplies<double>());
+        std::transform(val_ptr, val_ptr + _n_samp, grad, grad, std::multiplies<double>());
 
-            int start = 2;
-            for(int ff = N - 1; ff >=0; --ff)
-            {
-                _feats[ff]->gradient(grad + start * _n_samp, dfdp, params + start);
-                start += _feats[ff]->n_params();
-            }
+        std::transform(dfdp, dfdp + _n_samp, grad + _n_samp, grad + _n_samp, std::multiplies<double>());
+
+        for(int pp = 2; pp  < np; ++pp)
+        {
+            std::transform(dfdp, dfdp + _n_samp, grad + pp * _n_samp, grad + pp * _n_samp, [params](double d, double g){return params[0] * d * g;});
         }
 
-        #ifdef PY_BINDINGS
+        int start = 2;
+        for(int ff = N - 1; ff >=0; --ff)
+        {
+            _feats[ff]->gradient(grad + start * _n_samp, dfdp, params + start);
+            start += _feats[ff]->n_params();
+        }
+    }
 
-            // DocString: op_node_set_param_list
-            /**
-             * @brief Set the non-linear parameters
-             */
-            inline void set_parameters(py::list params){set_parameters(python_conv_utils::from_list<double>(params));}
+    #ifdef PY_BINDINGS
 
-            // DocString: op_node_set_param_arr
-            /**
-             * @brief Set the non-linear parameters
-             */
-            inline void set_parameters(np::ndarray params){set_parameters( python_conv_utils::from_ndarray<double>(params));}
-        #endif
+    // DocString: op_node_set_param_list
+    /**
+     * @brief Set the non-linear parameters
+     */
+    inline void set_parameters(py::list params){set_parameters(python_conv_utils::from_list<double>(params));}
+
+    // DocString: op_node_set_param_arr
+    /**
+     * @brief Set the non-linear parameters
+     */
+    inline void set_parameters(np::ndarray params){set_parameters( python_conv_utils::from_ndarray<double>(params));}
+    #endif
     #endif
 };
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
index 4dc3bf2ee7443f48688a369e37e0c6762a38fa8e..fb83fc9f8a98b32a9d8e22417985d7b585fc6585 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.cpp
@@ -5,18 +5,28 @@ void generateAbsNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 {
     ++feat_ind;
     if((feat->type() == NODE_TYPE::ABS) || (feat->type() == NODE_TYPE::ABS_DIFF))
+    {
         return;
+    }
 
     int offset = -1;
     double* val_ptr = feat->value_ptr(2 * offset);
     if(*std::min_element(val_ptr, val_ptr + feat->n_samp()) > 0.0)
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<AbsNode>(feat, feat_ind);
     val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -32,25 +42,40 @@ AbsNode::AbsNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     OperatorNode({feat}, feat_ind)
 {
     if((feat->type() == NODE_TYPE::ABS) || (feat->type() == NODE_TYPE::ABS_DIFF))
+    {
         throw InvalidFeatureException();
+    }
 
     double* val_ptr = feat->value_ptr(0);
     if(*std::min_element(val_ptr, val_ptr + _n_samp) > 0.0)
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void AbsNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -59,9 +84,13 @@ void AbsNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -86,5 +115,11 @@ void AbsNode::set_value(int offset, bool for_comp)
 void AbsNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::abs(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::abs(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
index 77da64f168d7aeca8d3114558e80692256e6602c..858b45d50d423f0a4a2bf0943551997bdeb3fa01 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp
@@ -145,94 +145,94 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "|{:.10e}*{}{:+15.10e}|",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return util_funcs::sign(params[0] * vp + params[1]);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "|{:.10e}*{}{:+15.10e}|",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\left|{:.3e}{}{:+8.3e}\\right|\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return util_funcs::sign(params[0] * vp + params[1]);});
+    }
     #endif
 };
 void generateAbsNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
index 157cff8239780494ded45fc07eac08993097e30b..a6b1ff4bd9244978e8b68e53968dab2412c7bb56 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.cpp
@@ -2,16 +2,27 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsParamNode)
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateAbsParamNode(
+    std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<AbsParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,8 +37,16 @@ AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 AbsParamNode::AbsParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -59,7 +78,9 @@ void AbsParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
         _sign_alpha = 1.0;
     }
     else if(min_res_neg == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void AbsNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -67,21 +88,45 @@ void AbsNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::abs(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 void AbsNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
+    double* vp_0;
+
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
 
-    allowed_op_funcs::abs(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    allowed_op_funcs::abs(
+        _n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -90,7 +135,9 @@ void AbsNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
 
@@ -100,7 +147,9 @@ void AbsParamNode::set_bounds(double* lb, double* ub, int from_parent, int depth
     ub[0] = _sign_alpha;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
index 139a4df6358d0141076f7fcbe800df9caee7f062..1640a0ee9ba6edc11370d431f56f4338617307f5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp
@@ -146,8 +146,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -155,7 +154,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -184,13 +185,17 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateAbsParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateAbsParamNode(
+    std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
index 925137cbde26d46cf973efcdf386384fee56a4cb..38577714d801821fddd2c3bbf467d102b1ed1d95 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.cpp
@@ -5,30 +5,50 @@ void generateAbsDiffNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node
 {
     ++feat_ind;
     if(feat_1->unit() != feat_2->unit())
+    {
         return;
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
     feat_1->update_add_sub_leaves(add_sub_leaves, 1, expected_abs_tot);
     feat_2->update_add_sub_leaves(add_sub_leaves,-1, expected_abs_tot);
 
-    if((add_sub_leaves.size() < 2) || (std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0))
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if((add_sub_leaves.size() < 2) || std::abs((leaves_v_expected) != 0))
+    {
         return;
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SubNode>(feat_1, feat_2, feat_ind);
     double* val_ptr = new_feat->value_ptr();
     if(*std::min_element(val_ptr, val_ptr + new_feat->n_samp()) > l_bound)
+    {
         return;
+    }
 
     new_feat = std::make_shared<AbsDiffNode>(feat_1, feat_2, feat_ind);
     new_feat->set_value();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -44,7 +64,9 @@ AbsDiffNode::AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int fea
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     if(feat_1->unit() != feat_2->unit())
+    {
         throw InvalidFeatureException();
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
@@ -52,33 +74,58 @@ AbsDiffNode::AbsDiffNode(node_ptr feat_1, node_ptr feat_2, unsigned long int fea
     _feats[1]->update_add_sub_leaves(add_sub_leaves,-1, expected_abs_tot);
 
     if((add_sub_leaves.size() < 2))
+    {
         throw InvalidFeatureException();
+    }
 
-    if(std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0)
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if(std::abs(leaves_v_expected) != 0)
+    {
         throw InvalidFeatureException();
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
 
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     double* val_ptr = node_value_arrs::get_value_ptr(feat_ind, feat_ind, rung());
     allowed_op_funcs::sub(feat_1->n_samp(), feat_1->value_ptr(rung() + 2), feat_2->value_ptr(rung() + 1), 1.0, 0.0, val_ptr);
     if(*std::min_element(val_ptr, val_ptr + _n_samp) > l_bound)
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void AbsDiffNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -87,9 +134,13 @@ void AbsDiffNode::update_div_mult_leaves(std::map<std::string, double>& div_mult
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -114,5 +165,12 @@ void AbsDiffNode::set_value(int offset, bool for_comp)
 void AbsDiffNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::abs_diff(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), _feats[1]->test_value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::abs_diff(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        _feats[1]->test_value_ptr(2 * offset + 1, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
index fbf8db07979355b9cc01f310cb474a237bac0c27..1d5a3c6d441a22f88c5a56ace7510c8b93b1cd9c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp
@@ -156,97 +156,103 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "|{} - ({:.10e}*{}{:+15.10e})|",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
-            double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
-            std::transform(val_ptr_1, val_ptr_1 + _n_samp, val_ptr_2, dfdp, [params](double vp_1, double vp_2){return -1.0 * util_funcs::sign(vp_1 - (params[0] * vp_2 + params[1]));});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "|{} - ({:.10e}*{}{:+15.10e})|",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\left|{} - \\left({:.3e}{}{:+8.3e}\\right)\\right|\\right)",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
+        double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
+        std::transform(
+            val_ptr_1,
+            val_ptr_1 + _n_samp,
+            val_ptr_2,
+            dfdp,
+            [params](double vp_1, double vp_2){return -1.0 * util_funcs::sign(vp_1 - (params[0] * vp_2 + params[1]));}
+        );
+    }
     #endif
 };
 
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
index 20133fbece4e663292338421e3393d42b0a025f7..37bdb49cb49419124b4a181a4fa684bfdab9e16d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.cpp
@@ -2,23 +2,50 @@
 
 BOOST_SERIALIZATION_ASSUME_ABSTRACT(AbsDiffParamNode)
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer)
+void generateAbsDiffParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+)
 {
     ++feat_ind;
     node_ptr new_feat = std::make_shared<AbsDiffParamNode>(feat_1, feat_2, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     double* params = new_feat->parameters().data();
     double* val_ptr = new_feat->value_ptr();
     const int offset = new_feat->rung();
-    allowed_op_funcs::sub(feat_1->n_samp(), feat_1->value_ptr(params + 2 + feat_1->n_params(), 2 * offset), feat_2->value_ptr(params + 2, 2 * offset + 1), params[0], params[1], val_ptr);
+    allowed_op_funcs::sub(
+        feat_1->n_samp(),
+        feat_1->value_ptr(params + 2 + feat_1->n_params(), 2 * offset),
+        feat_2->value_ptr(params + 2, 2 * offset + 1),
+        params[0],
+        params[1],
+        val_ptr
+    );
     if(*std::min_element(val_ptr, val_ptr + new_feat->n_samp()) > l_bound)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound) || (util_funcs::max_abs_val<double>(val_ptr, feat_1->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound) ||
+        (util_funcs::max_abs_val<double>(val_ptr, feat_1->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,7 +53,9 @@ void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1,
 AbsDiffParamNode::AbsDiffParamNode()
 {}
 
-AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer):
+AbsDiffParamNode::AbsDiffParamNode(
+    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+):
     AbsDiffNode(feat_1, feat_2, feat_ind)
 {
     std::map<std::string, int> add_sub_leaves;
@@ -35,7 +64,9 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned lo
     feat_2->update_add_sub_leaves(add_sub_leaves,-1, expected_abs_tot);
 
     if((add_sub_leaves.size() < 2))
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
@@ -44,12 +75,29 @@ AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned lo
     double* val_ptr = value_ptr();
     const int offset = rung();
 
-    allowed_op_funcs::sub(feat_1->n_samp(), feat_1->value_ptr(params + 2 + feat_1->n_params(), 2 * offset), feat_2->value_ptr(params + 2, 2 * offset + 1), params[0], params[1], val_ptr);
+    allowed_op_funcs::sub(
+        feat_1->n_samp(),
+        feat_1->value_ptr(params + 2 + feat_1->n_params(), 2 * offset),
+        feat_2->value_ptr(params + 2, 2 * offset + 1),
+        params[0],
+        params[1],
+        val_ptr
+    );
     if(*std::min_element(val_ptr, val_ptr + _n_samp) > l_bound)
+    {
         throw InvalidFeatureException();
-
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const()|| (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    }
+
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 AbsDiffParamNode::AbsDiffParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer):
@@ -69,7 +117,9 @@ void AbsDiffParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void AbsDiffNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -77,13 +127,35 @@ void AbsDiffNode::set_value(const double* params, int offset, bool for_comp, int
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->value_ptr(2 * offset + 1);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::abs_diff(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
@@ -92,16 +164,37 @@ void AbsDiffNode::set_test_value(const double* params, int offset, bool for_comp
 {
     offset += (offset == -1);
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->test_value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->test_value_ptr(2 * offset + 1);
+    }
 
-    allowed_op_funcs::abs_diff(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    allowed_op_funcs::abs_diff(
+        _n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void AbsDiffNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
 {
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
index 0f632e5db8ab7c9bfb92c6369cb0de158fad480c..bf055c2c9f76eb78fb15cc5a6622b20f2199f099 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp
@@ -144,8 +144,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -153,7 +152,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -175,7 +176,9 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[1]->update_postfix(cur_expr, false);
@@ -183,6 +186,14 @@ public:
     }
 };
 
-void generateAbsDiffParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateAbsDiffParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
index 239730b07ff459425210800566f88b7fae64fdf3..d5fade7b2e82baf203e4bf76a143bc40ec6d75ea 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.cpp
@@ -4,24 +4,42 @@ void generateAddNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 {
     ++feat_ind;
     if(feat_1->unit() != feat_2->unit())
+    {
         return;
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
     feat_1->update_add_sub_leaves(add_sub_leaves, 1, expected_abs_tot);
     feat_2->update_add_sub_leaves(add_sub_leaves, 1, expected_abs_tot);
 
-    if((add_sub_leaves.size() < 2) || (std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0))
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if((add_sub_leaves.size() < 2) || (std::abs(leaves_v_expected) != 0))
+    {
         return;
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<AddNode>(feat_1, feat_2, feat_ind);
     double* val_ptr = new_feat->value_ptr();
-    if(new_feat->is_const()  || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
          return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -37,26 +55,46 @@ AddNode::AddNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     if(_feats[0]->unit() != _feats[1]->unit())
+    {
         throw InvalidFeatureException();
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
     update_add_sub_leaves(add_sub_leaves, 1, expected_abs_tot);
 
     if((add_sub_leaves.size() < 2))
+    {
         throw InvalidFeatureException();
+    }
 
-    if(std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0)
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if(std::abs(leaves_v_expected) != 0)
+    {
         throw InvalidFeatureException();
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
-
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void AddNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
@@ -69,9 +107,13 @@ void AddNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -96,5 +138,12 @@ void AddNode::set_value(int offset, bool for_comp)
 void AddNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::add(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), _feats[1]->test_value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::add(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        _feats[1]->test_value_ptr(2 * offset + 1, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
index 94ac5d6ab8886d8578f06ee81c7da37eaa5bc869..ded827d535b4ba2a94aad56b7d0837a7360f20e3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp
@@ -153,92 +153,92 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "({} + {:.10e}*{}{:+15.10e})",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left({} + {:.3}*{}{:+8.3e}\\right)",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp,  _n_samp, 1.0);}
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "({} + {:.10e}*{}{:+15.10e})",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left({} + {:.3}*{}{:+8.3e}\\right)",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp,  _n_samp, 1.0);}
     #endif
 };
 void generateAddNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
index 06e2881d83ba2d81c4ad9d2e4c318bb528d16efd..764ec3452e7d0f5e8bb9459eca9eca885f0ccf22 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.cpp
@@ -7,12 +7,21 @@ void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
     ++feat_ind;
     node_ptr new_feat = std::make_shared<AddParamNode>(feat_1, feat_2, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
 
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -20,14 +29,24 @@ void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 AddParamNode::AddParamNode()
 {}
 
-AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+AddParamNode::AddParamNode(
+    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+) :
     AddNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 AddParamNode::AddParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -47,7 +66,9 @@ void AddParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void AddNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -55,13 +76,35 @@ void AddNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->value_ptr(2 * offset + 1);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::add(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
@@ -70,10 +113,34 @@ void AddNode::set_test_value(const double* params, int offset, bool for_comp, in
 {
     offset += (offset == -1);
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->test_value_ptr(2 * offset + 1);
-
-    allowed_op_funcs::add(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->test_value_ptr(2 * offset + 1);
+    }
+
+    allowed_op_funcs::add(
+        _n_test_samp,
+        vp_0,
+        vp_1,
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -82,7 +149,9 @@ void AddNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[1] = 0.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
index f87b3ac15b4fbc082d275221535f06f9b3a2e985..c0732af1bdbae73c1ef1c45296eb9daae2e0e449 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp
@@ -145,8 +145,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -154,7 +153,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -176,7 +177,9 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[1]->update_postfix(cur_expr, false);
@@ -184,6 +187,14 @@ public:
     }
 };
 
-void generateAddParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateAddParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
index eafbb70e08a8d106ed46d17a62f4b9d2185b0c7e..44d8b8488771df63a47ac649c5cb0f36bd19ce0b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.cpp
@@ -4,14 +4,21 @@ void generateCbNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned lo
 {
     ++feat_ind;
     if((feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::INV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<CbNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,13 +34,21 @@ CbNode::CbNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double
     OperatorNode({feat}, feat_ind)
 {
     if((feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::INV))
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
 }
 
@@ -41,9 +56,13 @@ void CbNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, i
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -72,5 +91,11 @@ void CbNode::set_value(int offset, bool for_comp)
 void CbNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::cb(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::cb(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
index afbed63379b7526cadb3d71b2172ca686ba9415f..15a4efa7370eeb7677df729fac35fb44dcf1c57d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp
@@ -143,94 +143,94 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(({:.10e}*{}{:+15.10e})^3)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\left({:.3e}{}{:+8.3e}\\right)^3\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 3.0 * std::pow(params[0] * vp + params[1], 2.0);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(({:.10e}*{}{:+15.10e})^3)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\left({:.3e}{}{:+8.3e}\\right)^3\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 3.0 * std::pow(params[0] * vp + params[1], 2.0);});
+    }
     #endif
 };
 void generateCbNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
index aba464b25ee824f32951819262c4041b94283db3..62a87aa5b0721ab147fd17f3103d4225cd03b00b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.cpp
@@ -7,12 +7,20 @@ void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsign
     ++feat_ind;
     node_ptr new_feat = std::make_shared<CbParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -28,8 +36,15 @@ CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, double l_bou
     get_parameters(optimizer);
     set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan()  || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 CbParamNode::CbParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -51,7 +66,9 @@ void CbParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void CbNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -59,12 +76,25 @@ void CbNode::set_value(const double* params, int offset, bool for_comp, int dept
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::cb(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
 }
@@ -72,9 +102,24 @@ void CbNode::set_value(const double* params, int offset, bool for_comp, int dept
 void CbNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
 
-    allowed_op_funcs::cb(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::cb(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -83,7 +128,9 @@ void CbNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
index 7efd25e0e86a3ac222fafaf09553a46b931f2576..cebd4920fcca3dea2f26190ed150d941a6650fb6 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp
@@ -100,7 +100,6 @@ public:
      */
     inline void set_test_value(int offset=-1, bool for_comp=false){set_test_value(_params.data(), offset, for_comp);}
 
-
     /**
      * @brief Get the pointer to the feature's training data
      * @details If the feature is not already stored in memory, then calculate the feature and return the pointer to the data
@@ -144,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -153,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -175,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateCbParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateCbParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
index 959fb781d149dc0c974e7661b7f7c235b50c397d..42f8ef1b92c028988d073d0b1a4c63ba7d0e0657 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.cpp
@@ -4,14 +4,21 @@ void generateCbrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned
 {
     ++feat_ind;
     if((feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::INV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<CbrtNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,26 +34,36 @@ CbrtNode::CbrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, do
     OperatorNode({feat}, feat_ind)
 {
     if((feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::INV))
+    {
         throw InvalidFeatureException();
+    }
 
     double* val_ptr = feat->value_ptr(rung() + 2);
     if(*std::min_element(val_ptr, val_ptr + _n_samp) < 0.0)
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
     if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void CbrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -76,5 +93,11 @@ void CbrtNode::set_value(int offset, bool for_comp)
 void CbrtNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::cbrt(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::cbrt(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
index 17ddb375bb8788c6d5f9d4ba0655491b8bae29d6..ff5c7da95613aa62d90396731dbd80113e2b9b1b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp
@@ -143,93 +143,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
    #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(cbrt({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\sqrt[3]{{ {:.3e}{}{:+8.3e} }}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / 3.0 * std::pow(params[0] * vp + params[1], -2.0 / 3.0);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(cbrt({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\sqrt[3]{{ {:.3e}{}{:+8.3e} }}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / 3.0 * std::pow(params[0] * vp + params[1], -2.0 / 3.0);});
+    }
     #endif
 };
 void generateCbrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
index 448e505a1413f8ac95a7c974ea6dcf97af2006e2..530ff355bb71015d208c9aae1119bf54c70d07a8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.cpp
@@ -7,12 +7,20 @@ void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsi
     ++feat_ind;
     node_ptr new_feat = std::make_shared<CbrtParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -28,8 +36,15 @@ CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, double l
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 CbrtParamNode::CbrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -61,19 +76,35 @@ void CbrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
         _sign_alpha = 1.0;
     }
     else if(min_res_neg == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void CbrtNode::set_value(const double* params, int offset, bool for_comp, int depth)
 {
     bool is_root = (offset == -1);
     offset += is_root;
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::cbrt(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -81,9 +112,23 @@ void CbrtNode::set_value(const double* params, int offset, bool for_comp, int de
 void CbrtNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
 
-    allowed_op_funcs::cbrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    allowed_op_funcs::cbrt(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -92,7 +137,9 @@ void CbrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
@@ -103,7 +150,9 @@ void CbrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     ub[0] = _sign_alpha;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
index b809050f9b9924196566fb76a79716b534979d4d..31fe6e61e7ac4d9e5aa23195829e4d3af98cf36e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp
@@ -144,8 +144,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -153,7 +152,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -183,13 +184,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateCbrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateCbrtParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
index cd3ad91afc3d66433b3ebf50cba18c71561b581e..fe5138ac4bf93ae60317f00ac6aaf625cc33540b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.cpp
@@ -4,13 +4,22 @@ void generateCosNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 {
     ++feat_ind;
     if(feat->unit() != Unit() || (feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<CosNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,24 +34,34 @@ CosNode::CosNode(node_ptr feat, unsigned long int feat_ind):
 CosNode::CosNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if(feat->unit() != Unit())
-        throw InvalidFeatureException();
-
-    if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    if(feat->unit() != Unit() || (feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void CosNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -51,9 +70,13 @@ void CosNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -78,5 +101,11 @@ void CosNode::set_value(int offset, bool for_comp)
 void CosNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::cos(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::cos(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
index 595fd72902759722f1d233a92ba4038181b79776..83b5b25735287ea036d79b7020d285480d86f557 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp
@@ -143,93 +143,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(cos({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\cos{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::sin(params[0] * vp + params[1]);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(cos({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\cos{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::sin(params[0] * vp + params[1]);});
+    }
     #endif
 };
 void generateCosNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
index 57fecc58f1ee0c7d98a5bd00e9f4058b6d4eab4c..ecec61510e3dc3c85960309cb0ef6ca7639c7e98 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.cpp
@@ -7,15 +7,26 @@ void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<CosParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,13 +38,23 @@ CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     CosNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 CosParamNode::CosParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -53,7 +74,9 @@ void CosParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void CosNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -61,12 +84,25 @@ void CosNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
 
     allowed_op_funcs::cos(_n_samp, vp_0, params[0], params[1], val_ptr);
@@ -75,9 +111,24 @@ void CosNode::set_value(const double* params, int offset, bool for_comp, int dep
 void CosNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
 
-    allowed_op_funcs::cos(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::cos(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void CosNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -85,5 +136,10 @@ void CosNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     lb[1] = -1.0 * M_PI;
     ub[1] = M_PI;
 
+    if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        return;
+    }
+
     _feats[0]->set_bounds(lb + 2, ub + 2);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
index 6c17dbab6b78def5affc9b1ed1c76a7d0770b7e7..afb2b3b996c83320a80f644e9ef6707e4d15bc77 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp
@@ -144,8 +144,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -153,7 +152,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -175,13 +176,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateCosParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateCosParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
index 3ad216d82b4f67d8f073842f46f6433bdb26d94c..79b773be7cc0c270bd78881eb0f2964db5280112 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.cpp
@@ -4,24 +4,46 @@ void generateDivNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 {
     ++feat_ind;
     if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
+    {
         return;
+    }
 
     std::map<std::string, double> div_mult_leaves;
     double expected_abs_tot = 0.0;
     feat_1->update_div_mult_leaves(div_mult_leaves, 1.0, expected_abs_tot);
     feat_2->update_div_mult_leaves(div_mult_leaves, -1.0, expected_abs_tot);
 
-    if((div_mult_leaves.size() < 2) || (std::abs(std::accumulate(div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);})) > 1e-12))
+    double leaves_v_expected = std::accumulate(
+        div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);}
+    );
+    if(
+        (div_mult_leaves.size() < 2) ||
+        (std::abs(leaves_v_expected) > 1e-12)
+    )
+    {
         return;
+    }
 
     double div_mult_tot_first = div_mult_leaves.begin()->second;
-    if((std::abs(div_mult_tot_first) != 1.0) && std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return el.second == div_mult_tot_first;}))
+    if(
+        (std::abs(div_mult_tot_first) != 1.0) &&
+        std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return el.second == div_mult_tot_first;})
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<DivNode>(feat_1, feat_2, feat_ind);
     double* val_ptr = new_feat->value_ptr();
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -37,26 +59,45 @@ DivNode::DivNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     if((_feats[0]->type() == NODE_TYPE::INV) || (_feats[1]->type() == NODE_TYPE::INV) || (_feats[1]->type() == NODE_TYPE::DIV))
+    {
         throw InvalidFeatureException();
+    }
 
     std::map<std::string, double> div_mult_leaves;
     double expected_abs_tot = 0.0;
     update_div_mult_leaves(div_mult_leaves, 1.0, expected_abs_tot);
 
-    if((div_mult_leaves.size() < 2))
-        throw InvalidFeatureException();
+    double leaves_v_expected = std::accumulate(
+        div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);}
+    );
 
-    if(std::abs(std::accumulate(div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);})) > 1e-12)
+    if(
+        (div_mult_leaves.size() < 2) ||
+        (std::abs(leaves_v_expected) > 1e-12)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     double div_mult_tot_first = std::abs(div_mult_leaves.begin()->second);
-
-    if((std::abs(div_mult_tot_first) != 1.0) && std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;}))
+    if(
+        (std::abs(div_mult_tot_first) != 1.0) &&
+        std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;})
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
 }
 
@@ -64,9 +105,13 @@ void DivNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -97,5 +142,12 @@ void DivNode::set_value(int offset, bool for_comp)
 void DivNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::div(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), _feats[1]->test_value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::div(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        _feats[1]->test_value_ptr(2 * offset + 1, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
index a44eb19a4ba0f91d06a0f1abf41d809f623626d4..17d0bde8f3e034f86e0f2b6ab2c71a7f3e32d626 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp
@@ -153,97 +153,103 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "({} / ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\frac{{ {} }}{{ {:.3e}*{}{:+8.3e} }} \\right)",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
-            double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
-            std::transform(val_ptr_1, val_ptr_1 + _n_samp, val_ptr_2, dfdp, [params](double vp_1, double vp_2){return -1.0 * vp_1 / std::pow(params[0] * vp_2 + params[1], 2.0);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "({} / ({:.10e}*{}{:+15.10e}))",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\frac{{ {} }}{{ {:.3e}*{}{:+8.3e} }} \\right)",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr_1 = _feats[0]->value_ptr(params, 2);
+        double* val_ptr_2 = _feats[1]->value_ptr(params, 1);
+        std::transform(
+            val_ptr_1,
+            val_ptr_1 + _n_samp,
+            val_ptr_2,
+            dfdp,
+            [params](double vp_1, double vp_2){return -1.0 * vp_1 / std::pow(params[0] * vp_2 + params[1], 2.0);}
+        );
+    }
     #endif
 };
 void generateDivNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
index b15310cc32ae6c8f854b301255742664a39980ae..75804576be32cc5aae9f22569a7ddbeeb8836b17 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.cpp
@@ -6,15 +6,26 @@ void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 {
     ++feat_ind;
     if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<DivParamNode>(feat_1, feat_2, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -22,25 +33,36 @@ void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 DivParamNode::DivParamNode()
 {}
 
-DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+DivParamNode::DivParamNode(
+    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+) :
     DivNode(feat_1, feat_2, feat_ind)
 {
-     if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
+    if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::DIV))
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
     set_value();
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 DivParamNode::DivParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
     DivNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
-
     get_parameters(optimizer);
 }
 
@@ -54,7 +76,9 @@ void DivParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this, rung() > 1);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void DivNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -62,13 +86,35 @@ void DivNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->value_ptr(2 * offset + 1);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::div(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
@@ -77,10 +123,33 @@ void DivNode::set_test_value(const double* params, int offset, bool for_comp, in
 {
     offset += (offset == -1);
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->test_value_ptr(2 * offset + 1);
-
-    allowed_op_funcs::div(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->test_value_ptr(2 * offset + 1);
+    }
+    allowed_op_funcs::div(
+        _n_test_samp,
+        vp_0,
+        vp_1,
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -89,7 +158,9 @@ void DivNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
index 23608322ebb5bb67df4b683ed82ec1c158e4f2bf..b514c2127b8c1d792dd9318b58db46c9bd5aa7dc 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp
@@ -145,8 +145,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -154,7 +153,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -176,7 +177,9 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[1]->update_postfix(cur_expr, false);
@@ -184,6 +187,14 @@ public:
     }
 };
 
-void generateDivParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateDivParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
index 9de388884d3f9d7a9010cc205b5abfce0e1ab2ce..5167c99dfbb9642716481ac923f8e66d52274dd9 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.cpp
@@ -3,15 +3,29 @@
 void generateExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if((feat->unit() != Unit()) || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<ExpNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
          return;
+    }
     feat_list.push_back(new_feat);
 }
 
@@ -25,17 +39,29 @@ ExpNode::ExpNode(node_ptr feat, unsigned long int feat_ind):
 ExpNode::ExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if(feat->unit() != Unit())
-        throw InvalidFeatureException();
-
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_test_value();
 }
@@ -44,9 +70,13 @@ void ExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -55,9 +85,13 @@ void ExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -82,5 +116,11 @@ void ExpNode::set_value(int offset, bool for_comp)
 void ExpNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::exp(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::exp(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
index e507d108cd95b04f80fc6738a0e02448c2902593..69fd00b63b989dc4fd0cf3effd09c5987b9c451a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp
@@ -143,93 +143,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(exp({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\exp{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::exp(params[0] * vp + params[1]);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(exp({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\exp{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::exp(params[0] * vp + params[1]);});
+    }
 
     #endif
 };
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
index bb1e05069a2572682296d5e071e59444c22fdc63..d9b16328619c2650a0d82242c4c2177e4d1d4884 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.cpp
@@ -6,17 +6,33 @@ void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 {
     ++feat_ind;
 
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<ExpParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,15 +43,30 @@ ExpParamNode::ExpParamNode()
 ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     ExpNode(feat, feat_ind)
 {
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+   if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 ExpParamNode::ExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -55,7 +86,9 @@ void ExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this, true);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void ExpNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -63,22 +96,50 @@ void ExpNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
-    allowed_op_funcs::exp(_n_samp, vp_0, params[0], params[1], node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    allowed_op_funcs::exp(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
 
 void ExpNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
 
-    allowed_op_funcs::exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::exp(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -89,7 +150,9 @@ void ExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(ub - from_parent) = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
index 2b150202d69640407df3becfa1abda647232f34f..3441efc5eeb5a1d18dedc9cac2817916776a9ec5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateExpParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
index 56609e616ef73b8231abc6424878a4eb306d9b03..851742d9a9d19aea125064738f8e97c46791bbac 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.cpp
@@ -5,14 +5,21 @@ void generateInvNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned l
 
     ++feat_ind;
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::INV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<InvNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
-         return;
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
+        return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -28,13 +35,21 @@ InvNode::InvNode(node_ptr feat, unsigned long int feat_ind, double l_bound, doub
     OperatorNode({feat}, feat_ind)
 {
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::INV))
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
 }
 
@@ -42,9 +57,13 @@ void InvNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -74,5 +93,11 @@ void InvNode::set_value(int offset, bool for_comp)
 void InvNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::inv(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::inv(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
index 46af4b0b477e933aadf355dd2d1748d2342142a5..9e6f829c870fb3bfb575f67cfde4a4ce0451df4f 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp
@@ -139,93 +139,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(1.0 / ({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\frac{{1}}{{ {:.3e}{}{:+8.3e} }}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 / std::pow(params[0] * vp + params[1], 2.0);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(1.0 / ({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\frac{{1}}{{ {:.3e}{}{:+8.3e} }}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 / std::pow(params[0] * vp + params[1], 2.0);});
+    }
     #endif
 };
 void generateInvNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
index c16d266fd719adddb275888d29216c0dbaf4fc61..0c7a960507a31e83d312a0a49a48555f2115a310 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.cpp
@@ -7,16 +7,26 @@ void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<InvParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -28,14 +38,23 @@ InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     InvNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV))
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 InvParamNode::InvParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -55,7 +74,9 @@ void InvParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this, rung() > 1);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void InvNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -63,12 +84,25 @@ void InvNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::inv(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -76,7 +110,16 @@ void InvNode::set_value(const double* params, int offset, bool for_comp, int dep
 void InvNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
 
     allowed_op_funcs::inv(_n_test_samp, vp_0, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
 }
@@ -87,7 +130,9 @@ void InvNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
index 777ea7a052840ce86f19b7be92db3d32feb458de..919ab4aaa14aab5c7a665bd4e04df411e714c70c 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateInvParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateInvParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
index 77d6d7ad4bfcad30bde9f24c8d6671d92f4a4d88..7590e8b6fda043c5f803ce391a4192f65a04d63a 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.cpp
@@ -3,15 +3,35 @@
 void generateLogNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if(feat->unit() != Unit() || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::DIV) ||
+        (feat->type() == NODE_TYPE::INV) ||
+        (feat->type() == NODE_TYPE::MULT) ||
+        (feat->type() == NODE_TYPE::LOG) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<LogNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,17 +46,35 @@ LogNode::LogNode(node_ptr feat, unsigned long int feat_ind):
 LogNode::LogNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if(feat->unit() != Unit())
-        throw InvalidFeatureException();
-
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::DIV) ||
+        (feat->type() == NODE_TYPE::INV) ||
+        (feat->type() == NODE_TYPE::MULT) ||
+        (feat->type() == NODE_TYPE::LOG) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_test_value();
 }
@@ -45,9 +83,13 @@ void LogNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -56,9 +98,13 @@ void LogNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -83,5 +129,11 @@ void LogNode::set_value(int offset, bool for_comp)
 void LogNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::log(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::log(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
index 000132397925a058b0e43e406fe62f3a6c1f6ccc..3498b292c63a038fcb36ebc013516d4e742ae6db 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp
@@ -143,93 +143,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(ln({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\ln{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / (params[0] * vp + params[1]);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(ln({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\ln{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 1.0 / (params[0] * vp + params[1]);});
+    }
     #endif
 };
 void generateLogNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
index afdace8e1efbc1c4cbaac49a1b3cc80babe090f8..ca296469c813fa9cfeb00671f9b63d18aebefe0e 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.cpp
@@ -6,17 +6,39 @@ void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
 {
     ++feat_ind;
 
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::DIV) ||
+        (feat->type() == NODE_TYPE::INV) ||
+        (feat->type() == NODE_TYPE::MULT) ||
+        (feat->type() == NODE_TYPE::LOG) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<LogParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,15 +49,36 @@ LogParamNode::LogParamNode()
 LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     LogNode(feat, feat_ind)
 {
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::DIV) || (feat->type() == NODE_TYPE::INV) || (feat->type() == NODE_TYPE::MULT) || (feat->type() == NODE_TYPE::LOG) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::DIV) ||
+        (feat->type() == NODE_TYPE::INV) ||
+        (feat->type() == NODE_TYPE::MULT) ||
+        (feat->type() == NODE_TYPE::LOG) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+   if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 LogParamNode::LogParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -55,19 +98,35 @@ void LogParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void LogNode::set_value(const double* params, int offset, bool for_comp, int depth)
 {
     bool is_root = (offset == -1);
     offset += is_root;
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::log(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -75,9 +134,23 @@ void LogNode::set_value(const double* params, int offset, bool for_comp, int dep
 void LogNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-
-    allowed_op_funcs::log(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::log(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -86,7 +159,9 @@ void LogNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(ub - from_parent + 1) = 0.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
index 1a746c35e2588a6744d5a0817476bfd9ed3102a7..2c6857d491f79fdf93de4c9e11c16804986359de 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateLogParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateLogParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
index 425417623d25195ba140401919edf1fd61bedd1c..78cf671874ccdbbe9a2cd0ddfcf796582b8c25ca 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.cpp
@@ -3,26 +3,52 @@
 void generateMultNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if((feat_1->type() == NODE_TYPE::INV) || (feat_2->type() == NODE_TYPE::INV) || ((feat_1->type() == NODE_TYPE::DIV) && (feat_2->type() == NODE_TYPE::DIV)))
+    if(
+        (feat_1->type() == NODE_TYPE::INV) ||
+        (feat_2->type() == NODE_TYPE::INV) ||
+        ((feat_1->type() == NODE_TYPE::DIV) && (feat_2->type() == NODE_TYPE::DIV))
+    )
+    {
         return;
+    }
 
     std::map<std::string, double> div_mult_leaves;
     double expected_abs_tot = 0.0;
     feat_1->update_div_mult_leaves(div_mult_leaves, 1.0, expected_abs_tot);
     feat_2->update_div_mult_leaves(div_mult_leaves, 1.0, expected_abs_tot);
 
-    if((div_mult_leaves.size() < 2) || (std::abs(std::accumulate(div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);})) > 1e-12))
+    double leaves_v_expected = std::accumulate(
+        div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);}
+    );
+    if(
+        (div_mult_leaves.size() < 2) ||
+        (std::abs(leaves_v_expected) > 1e-12)
+    )
+    {
         return;
+    }
 
     double div_mult_tot_first = std::abs(div_mult_leaves.begin()->second);
 
-    if((std::abs(div_mult_tot_first) - 1.0 > 1e-12) && std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;}))
+    if(
+        (std::abs(div_mult_tot_first) - 1.0 > 1e-12) &&
+        std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;})
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<MultNode>(feat_1, feat_2, feat_ind);
     double* val_ptr = new_feat->value_ptr();
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -37,36 +63,64 @@ MultNode::MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind)
 MultNode::MultNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
-    if((_feats[0]->type() == NODE_TYPE::INV) || (_feats[1]->type() == NODE_TYPE::INV) || ((_feats[0]->type() == NODE_TYPE::DIV) && (_feats[1]->type() == NODE_TYPE::DIV)))
+    if(
+        (_feats[0]->type() == NODE_TYPE::INV) ||
+        (_feats[1]->type() == NODE_TYPE::INV) ||
+        ((_feats[0]->type() == NODE_TYPE::DIV) && (_feats[1]->type() == NODE_TYPE::DIV))
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     std::map<std::string, double> div_mult_leaves;
     double expected_abs_tot = 0.0;
     update_div_mult_leaves(div_mult_leaves, 1.0, expected_abs_tot);
 
-    if((div_mult_leaves.size() < 2))
-        throw InvalidFeatureException();
+    double leaves_v_expected = std::accumulate(
+        div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);}
+    );
 
-    if(std::abs(std::accumulate(div_mult_leaves.begin(), div_mult_leaves.end(), -1.0*expected_abs_tot, [](double tot, auto el){return tot + std::abs(el.second);})) > 1e-12)
+    if(
+        (div_mult_leaves.size() < 2) ||
+        (std::abs(leaves_v_expected) > 1e-12)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     double div_mult_tot_first = std::abs(div_mult_leaves.begin()->second);
 
-    if((std::abs(div_mult_tot_first) - 1.0 > 1e-12) && std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;}))
+    if(
+        (std::abs(div_mult_tot_first) - 1.0 > 1e-12) &&
+        std::all_of(div_mult_leaves.begin(), div_mult_leaves.end(), [&div_mult_tot_first](auto el){return std::abs(el.second) == div_mult_tot_first;})
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void MultNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -97,5 +151,12 @@ void MultNode::set_value(int offset, bool for_comp)
 void MultNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::mult(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), _feats[1]->test_value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::mult(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        _feats[1]->test_value_ptr(2 * offset + 1, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
index 90008f984c965373708df906bd690373c9c70a71..dcc841c8e7981ccfd3931e10d955efb806b0c4a2 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp
@@ -154,92 +154,92 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "({} * ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left({} * \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp){std::copy_n(_feats[0]->value_ptr(params, 2),  _n_samp, dfdp);}
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "({} * ({:.10e}*{}{:+15.10e}))",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left({} * \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp){std::copy_n(_feats[0]->value_ptr(params, 2),  _n_samp, dfdp);}
     #endif
 };
 void generateMultNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
index c708954d6daab771ab4ac8c8665e7c729d1fc84c..ae7c771dcfd6571b2345859988df6feb37c046a8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.cpp
@@ -7,11 +7,20 @@ void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, no
     ++feat_ind;
     node_ptr new_feat = std::make_shared<MultParamNode>(feat_1, feat_2, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,8 +34,16 @@ MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 MultParamNode::MultParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound):
@@ -46,7 +63,9 @@ void MultParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void MultNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -54,13 +73,35 @@ void MultNode::set_value(const double* params, int offset, bool for_comp, int de
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->value_ptr(2 * offset + 1);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::mult(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
@@ -69,10 +110,34 @@ void MultNode::set_test_value(const double* params, int offset, bool for_comp, i
 {
     offset += (offset == -1);
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->test_value_ptr(2 * offset + 1);
-
-    allowed_op_funcs::mult(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->test_value_ptr(2 * offset + 1);
+    }
+
+    allowed_op_funcs::mult(
+        _n_test_samp,
+        vp_0,
+        vp_1,
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -81,7 +146,9 @@ void MultNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
index d6b072800e306ae5b7813299b2d0ef3fead08911..1a81b1109c5cde97a9be816f484129b80f4cd189 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,7 +175,9 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[1]->update_postfix(cur_expr, false);
@@ -182,6 +185,14 @@ public:
     }
 };
 
-void generateMultParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateMultParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
index d627225afdc266457409e7bddd98e8b3fff8c8bc..f98c2820894d2f117c13fa0a1f4f76b5ca4e34af 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.cpp
@@ -3,15 +3,29 @@
 void generateNegExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if(feat->unit() != Unit() || (feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<NegExpNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,26 +40,42 @@ NegExpNode::NegExpNode(node_ptr feat, unsigned long int feat_ind):
 NegExpNode::NegExpNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if(feat->unit() != Unit())
-        throw InvalidFeatureException();
-
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
     // No is_const check since cube function can only be constant if feat is constant
-    if(is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void NegExpNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -54,9 +84,13 @@ void NegExpNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_
 {
     std::string key = "exp(" + _feats[0]->expr() + ")";
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] -= fact;
+    }
     else
+    {
         div_mult_leaves[key] = -1.0 * fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -81,5 +115,11 @@ void NegExpNode::set_value(int offset, bool for_comp)
 void NegExpNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::neg_exp(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::neg_exp(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
index 9330f6c5cb4b6fc34c95f725ebedd2824fb20df3..cdc3d23e18ad873d2f933cd1020d492371484019 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp
@@ -144,93 +144,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(exp(-1.0 * {:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\exp{{ \\left(-\\left({:.3e}{}{:+8.3e} \\right)\\right)}}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::exp(-1.0 * (params[0] * vp + params[1]));});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(exp(-1.0 * {:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\exp{{ \\left(-\\left({:.3e}{}{:+8.3e} \\right)\\right)}}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return -1.0 * std::exp(-1.0 * (params[0] * vp + params[1]));});
+    }
     #endif
 };
 void generateNegExpNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
index fcd3cd37d8c46df0faec8db4dfe58e6f9582ad1b..30a954f25b10154b70dd37e6c1eda5a2ec305e04 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.cpp
@@ -6,17 +6,33 @@ void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, un
 {
     ++feat_ind;
 
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<NegExpParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,15 +43,30 @@ NegExpParamNode::NegExpParamNode()
 NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
     NegExpNode(feat, feat_ind)
 {
-    if((feat->type() == NODE_TYPE::NEG_EXP) || (feat->type() == NODE_TYPE::EXP) || (feat->type() == NODE_TYPE::ADD) || (feat->type() == NODE_TYPE::SUB) || (feat->type() == NODE_TYPE::LOG))
+    if(
+        (feat->type() == NODE_TYPE::NEG_EXP) ||
+        (feat->type() == NODE_TYPE::EXP) ||
+        (feat->type() == NODE_TYPE::ADD) ||
+        (feat->type() == NODE_TYPE::SUB) ||
+        (feat->type() == NODE_TYPE::LOG)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(),  0.0);
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+   if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 NegExpParamNode::NegExpParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -55,7 +86,9 @@ void NegExpParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this, true);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void NegExpNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -63,12 +96,25 @@ void NegExpNode::set_value(const double* params, int offset, bool for_comp, int
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::neg_exp(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -76,9 +122,23 @@ void NegExpNode::set_value(const double* params, int offset, bool for_comp, int
 void NegExpNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-
-    allowed_op_funcs::neg_exp(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::neg_exp(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -89,7 +149,9 @@ void NegExpNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     *(ub - from_parent) = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
index bd0d1999148fc08532de4f856734536412a4a21d..dc6fdd1b5a93666626ae921e1ddf18b35cc448c7 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateNegExpParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateNegExpParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
index 061a1e2d9ba19b1e034d50bf3935fe340cb6c9ed..feee393461738b4cad08f2e82165db3727bd91e8 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.cpp
@@ -7,15 +7,26 @@ void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsig
     ++feat_ind;
 
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SinParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -27,13 +38,23 @@ SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, double l_b
     SinNode(feat, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         throw InvalidFeatureException();
+    }
 
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 SinParamNode::SinParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -53,7 +74,9 @@ void SinParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void SinNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -61,12 +84,25 @@ void SinNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::sin(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -74,9 +110,23 @@ void SinNode::set_value(const double* params, int offset, bool for_comp, int dep
 void SinNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-
-    allowed_op_funcs::sin(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::sin(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -85,7 +135,9 @@ void SinNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[1] = M_PI;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
index ef6724f6ff3e354bf29b40ae18cd007ec86a4125..d289ce4577837e4bb6713933e850beea779e1db1 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateSinParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateSinParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
index c4d1ac25a6a73e2f0e2c96b0260cdb3a70513178..85af69dfa23a0508f152a51219238e5665025800 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.cpp
@@ -3,14 +3,26 @@
 void generateSinNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if(feat->unit() != Unit() || (feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    if(
+        (feat->unit() != Unit()) ||
+        (feat->type() == NODE_TYPE::SIN) ||
+        (feat->type() == NODE_TYPE::COS)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SinNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
          return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,25 +37,35 @@ SinNode::SinNode(node_ptr feat, unsigned long int feat_ind):
 SinNode::SinNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if(feat->unit() != Unit())
-        throw InvalidFeatureException();
-
-    if((feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    if((feat->unit() != Unit()) || (feat->type() == NODE_TYPE::SIN) || (feat->type() == NODE_TYPE::COS))
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
 
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void SinNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -52,9 +74,13 @@ void SinNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -79,5 +105,11 @@ void SinNode::set_value(int offset, bool for_comp)
 void SinNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::sin(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::sin(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
index 8667bc3b393a692019c3d3fd4e5124e6e34e3412..ebd75f2580f4dafae451713bfe09cd324b0ae331 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp
@@ -144,92 +144,92 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(sin({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\sin{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::cos(params[0] * vp + params[1]);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(sin({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\sin{{ \\left({:.3e}{}{:+8.3e} \\right)}}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr( params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return std::cos(params[0] * vp + params[1]);});
+    }
     #endif
 };
 void generateSinNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
index 1b5692dd4db0034db58471bd53439dc356bca7bd..6195650d9ec8880e19079e042da7437f05ff15c6 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.cpp
@@ -7,11 +7,20 @@ void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, un
     ++feat_ind;
     node_ptr new_feat = std::make_shared<SixPowParamNode>(feat, feat_ind, optimizer);
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,8 +34,16 @@ SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, doub
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 SixPowParamNode::SixPowParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -46,19 +63,35 @@ void SixPowParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this, true);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void SixPowNode::set_value(const double* params, int offset, bool for_comp, int depth)
 {
     bool is_root = (offset == -1);
     offset += is_root;
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::sixth_pow(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -66,9 +99,23 @@ void SixPowNode::set_value(const double* params, int offset, bool for_comp, int
 void SixPowNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-
-    allowed_op_funcs::sixth_pow(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::sixth_pow(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -77,7 +124,9 @@ void SixPowNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
index 7def292971bb1fc858ceba02ac56710888178c76..93d0b480351c376f9139bd2c32fa253bd1495b5d 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp
@@ -144,8 +144,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -153,7 +152,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -175,13 +176,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateSixPowParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateSixPowParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
index 1d3ac8aec151ada7280724a4092f435b93d79002..ef588df3a8a84e0c012185c41dfdeea3a37bda0b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.cpp
@@ -3,14 +3,28 @@
 void generateSixPowNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if((feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::INV))
+    if(
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::INV)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SixPowNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return (!std::isfinite(d)) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return (!std::isfinite(d)) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,21 +39,40 @@ SixPowNode::SixPowNode(node_ptr feat, unsigned long int feat_ind):
 SixPowNode::SixPowNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if((feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::SQRT) || (feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::INV))
+    if(
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::SQRT) ||
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::INV)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void SixPowNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -69,5 +102,11 @@ void SixPowNode::set_value(int offset, bool for_comp)
 void SixPowNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::sixth_pow(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::sixth_pow(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
index c51aebfaefaea1d8e72fd42287e7b0829910e446..ba331923adb5518d74a72b9132de0860c0fa3fef 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp
@@ -144,94 +144,94 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(({:.10e}*{}{:+15.10e})^6)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\left({:.3e}{}{:+8.3e}\\right)^6\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 6.0 * std::pow(params[0] * vp + params[1], 5.0);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(({:.10e}*{}{:+15.10e})^6)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\left({:.3e}{}{:+8.3e}\\right)^6\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 6.0 * std::pow(params[0] * vp + params[1], 5.0);});
+    }
     #endif
 };
 void generateSixPowNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
index ce23a64becfb6fd15d6fa52236b29b3f8427c7df..a33af35d9dba23fed60ea35f39ecf19ed50f3454 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.cpp
@@ -8,11 +8,20 @@ void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsign
     node_ptr new_feat = std::make_shared<SqParamNode>(feat, feat_ind, optimizer);
 
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,8 +35,16 @@ SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, double l_bou
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 SqParamNode::SqParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -47,19 +64,35 @@ void SqParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void SqNode::set_value(const double* params, int offset, bool for_comp, int depth)
 {
     bool is_root = (offset == -1);
     offset += is_root;
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::sq(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -67,9 +100,23 @@ void SqNode::set_value(const double* params, int offset, bool for_comp, int dept
 void SqNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-
-    allowed_op_funcs::sq(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    allowed_op_funcs::sq(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -78,7 +125,9 @@ void SqNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
index 46226875ee8b09cb2f0f70b1f32b1f7fecebf2d1..7a90945ec3bd451164b1d6dbe2e9d23dc4fd96f5 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp
@@ -143,8 +143,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -152,7 +151,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -174,13 +175,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateSqParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateSqParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
index 5dd48a4576cc374bc3874db95cf3c0be1fb88a83..70e66bc0d97cc478ee7329ab45e52acf0a3f109b 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.cpp
@@ -4,13 +4,22 @@ void generateSqNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned lo
 {
     ++feat_ind;
     if((feat->type() == NODE_TYPE::SQRT) || (feat->type() == NODE_TYPE::INV))
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SqNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -26,11 +35,20 @@ SqNode::SqNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double
     OperatorNode({feat}, feat_ind)
 {
     if((feat->type() == NODE_TYPE::SQRT) || (feat->type() == NODE_TYPE::INV))
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
 }
 
@@ -38,9 +56,13 @@ void SqNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, i
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -70,5 +92,11 @@ void SqNode::set_value(int offset, bool for_comp)
 void SqNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::sq(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::sq(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
index cc9c8b8b45a99ecf88ca126e8c05a7a9177a8151..9ce24583fa153e9abf14b4bb6d4eb76fb5e4a8f0 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp
@@ -143,94 +143,94 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(({:.10e}*{}{:+15.10e})^2)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\left({:.3e}{}{:+8.3e}\\right)^2\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 2.0 * params[0] * vp + params[1];});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(({:.10e}*{}{:+15.10e})^2)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\left({:.3e}{}{:+8.3e}\\right)^2\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 2.0 * params[0] * vp + params[1];});
+    }
     #endif
 };
 void generateSqNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
index 55f664ed7ad95295375a7a2266367a133c6452b0..d5d201d242f3999d8831f42db832af9c7326a6a4 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.cpp
@@ -8,12 +8,20 @@ void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsi
     node_ptr new_feat = std::make_shared<SqrtParamNode>(feat, feat_ind, optimizer);
 
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
     // No is_const check since cube function can only be constant if feat is constant
-    if(new_feat->is_nan() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -29,8 +37,15 @@ SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, double l
     get_parameters(optimizer);
 
     // No is_const check since cube function can only be constant if feat is constant
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+   if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 SqrtParamNode::SqrtParamNode(node_ptr feat, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -62,19 +77,35 @@ void SqrtParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
         _sign_alpha = 1.0;
     }
     else if(min_res_neg == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void SqrtNode::set_value(const double* params, int offset, bool for_comp, int depth)
 {
     bool is_root = (offset == -1);
     offset += is_root;
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
+
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::sqrt(_n_samp, vp_0, params[0], params[1], val_ptr);
 }
@@ -82,9 +113,23 @@ void SqrtNode::set_value(const double* params, int offset, bool for_comp, int de
 void SqrtNode::set_test_value(const double* params, int offset, bool for_comp, int depth)
 {
     offset += (offset == -1);
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
 
-    allowed_op_funcs::sqrt(_n_test_samp, _feats[0]->test_value_ptr(params + 2, 2 * offset), params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    allowed_op_funcs::sqrt(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(params + 2, 2 * offset),
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -93,7 +138,9 @@ void SqrtNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[0] = 1.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
@@ -104,7 +151,9 @@ void SqrtParamNode::set_bounds(double* lb, double* ub, int from_parent, int dept
     ub[0] = _sign_alpha;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2, ub + 2, depth + 1);
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
index e0abd3fc72082ca918ccc6662eab62551cce4230..82199a46fad127712f58fd1c9dedd55d79eb7186 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp
@@ -145,8 +145,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -154,7 +153,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -184,13 +185,22 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[0]->update_postfix(cur_expr, false);
     }
 };
 
-void generateSqrtParamNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateSqrtParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
index ce99961f792de0660df61de70f2b23e166cab666..c9d6e573ebe20f6570a7b3e37accbef6d7c639a4 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.cpp
@@ -3,14 +3,27 @@
 void generateSqrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound)
 {
     ++feat_ind;
-    if((feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::INV))
+    if(
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::INV)
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SqrtNode>(feat, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -25,12 +38,27 @@ SqrtNode::SqrtNode(node_ptr feat, unsigned long int feat_ind):
 SqrtNode::SqrtNode(node_ptr feat, unsigned long int feat_ind, double l_bound, double u_bound):
     OperatorNode({feat}, feat_ind)
 {
-    if((feat->type() == NODE_TYPE::SQ) || (feat->type() == NODE_TYPE::CB) || (feat->type() == NODE_TYPE::SIX_POW) || (feat->type() == NODE_TYPE::CBRT) || (feat->type() == NODE_TYPE::INV))
+    if(
+        (feat->type() == NODE_TYPE::SQ) ||
+        (feat->type() == NODE_TYPE::CB) ||
+        (feat->type() == NODE_TYPE::SIX_POW) ||
+        (feat->type() == NODE_TYPE::CBRT) ||
+        (feat->type() == NODE_TYPE::INV)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 
 }
 
@@ -38,9 +66,13 @@ void SqrtNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves,
 {
     std::string key = expr();
     if(add_sub_leaves.count(key) > 0)
+    {
         add_sub_leaves[key] += pl_mn;
+    }
     else
+    {
         add_sub_leaves[key] = pl_mn;
+    }
 
     ++expected_abs_tot;
 }
@@ -70,5 +102,11 @@ void SqrtNode::set_value(int offset, bool for_comp)
 void SqrtNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::sqrt(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::sqrt(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
index 3fc8cf72c45a5b755c99184f3e30dff0d54a4913..439e63497bc1d09369457863fc2ee6db7f916fbe 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp
@@ -144,93 +144,93 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "(sqrt({:.10e}*{}{:+15.10e}))",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left(\\sqrt{{ {:.3e}{}{:+8.3e} }}\\right)",
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp)
-        {
-            double* val_ptr = _feats[0]->value_ptr(params);
-            std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 0.5 * std::pow(params[0] * vp + params[1], -0.5);});
-        }
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "(sqrt({:.10e}*{}{:+15.10e}))",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + 2, depth + 1) : _feats[0]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left(\\sqrt{{ {:.3e}{}{:+8.3e} }}\\right)",
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    virtual void set_bounds(double* lb, double* ub, int from_parent=2, int depth=1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp)
+    {
+        double* val_ptr = _feats[0]->value_ptr(params);
+        std::transform(val_ptr, val_ptr + _n_samp, dfdp, [params](double vp){return 0.5 * std::pow(params[0] * vp + params[1], -0.5);});
+    }
     #endif
 };
 void generateSqrtNode(std::vector<node_ptr>& feat_list, node_ptr feat, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
index 07f0c8fefe43b13de55ee3a6847410b6127b875a..d0a588930ecf808544ba3414da91302a6b494795 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.cpp
@@ -8,11 +8,20 @@ void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
     node_ptr new_feat = std::make_shared<SubParamNode>(feat_1, feat_2, feat_ind, optimizer);
 
     if(std::abs(new_feat->parameters()[0]) <= 1e-10)
+    {
         return;
+    }
 
     new_feat->set_value();
-    if(new_feat->is_nan() || new_feat->is_const() || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) || (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_nan() ||
+        new_feat->is_const() ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) > u_bound) ||
+        (util_funcs::max_abs_val<double>(new_feat->value_ptr(), new_feat->n_samp()) < l_bound)
+    )
+    {
         return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -20,14 +29,24 @@ void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, nod
 SubParamNode::SubParamNode()
 {}
 
-SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer) :
+SubParamNode::SubParamNode(
+    node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer
+) :
     SubNode(feat_1, feat_2, feat_ind)
 {
     _params.resize(n_params(), 0.0);
     get_parameters(optimizer);
 
-    if((std::abs(_params[0]) <= 1e-10) || is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        (std::abs(_params[0]) <= 1e-10) ||
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 SubParamNode::SubParamNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, std::shared_ptr<NLOptimizer> optimizer) :
@@ -47,7 +66,9 @@ void SubParamNode::get_parameters(std::shared_ptr<NLOptimizer> optimizer)
 {
     double min_res = optimizer->optimize_feature_params(this);
     if(min_res == std::numeric_limits<double>::infinity())
+    {
         _params[0] = 0.0;
+    }
 }
 
 void SubNode::set_value(const double* params, int offset, bool for_comp, int depth)
@@ -55,13 +76,35 @@ void SubNode::set_value(const double* params, int offset, bool for_comp, int dep
     bool is_root = (offset == -1);
     offset += is_root;
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->value_ptr(2 * offset + 1);
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->value_ptr(2 * offset + 1);
+    }
+
     double* val_ptr;
     if(_selected && is_root)
+    {
         val_ptr = node_value_arrs::get_d_matrix_ptr(_d_mat_ind);
+    }
     else
+    {
         val_ptr = node_value_arrs::get_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false);
+    }
 
     allowed_op_funcs::sub(_n_samp, vp_0, vp_1, params[0], params[1], val_ptr);
 }
@@ -70,10 +113,33 @@ void SubNode::set_test_value(const double* params, int offset, bool for_comp, in
 {
     offset += (offset == -1);
 
-    double* vp_0 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1) : _feats[0]->test_value_ptr(2 * offset);
-    double* vp_1 = (depth < nlopt_wrapper::MAX_PARAM_DEPTH) ? _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1) : _feats[1]->test_value_ptr(2 * offset + 1);
-
-    allowed_op_funcs::sub(_n_test_samp, vp_0, vp_1, params[0], params[1], node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false));
+    double* vp_0;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_0 = _feats[0]->test_value_ptr(params + _feats[1]->n_params() + 2, 2 * offset, for_comp, depth + 1);
+    }
+    else
+    {
+        vp_0 = _feats[0]->test_value_ptr(2 * offset);
+    }
+
+    double* vp_1;
+    if(depth < nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
+        vp_1 = _feats[1]->test_value_ptr(params + 2, 2 * offset + 1, depth + 1);
+    }
+    else
+    {
+        vp_1 = _feats[1]->test_value_ptr(2 * offset + 1);
+    }
+    allowed_op_funcs::sub(
+        _n_test_samp,
+        vp_0,
+        vp_1,
+        params[0],
+        params[1],
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp, false)
+    );
 }
 
 void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
@@ -82,7 +148,9 @@ void SubNode::set_bounds(double* lb, double* ub, int from_parent, int depth)
     ub[1] = 0.0;
 
     if(depth >= nlopt_wrapper::MAX_PARAM_DEPTH)
+    {
         return;
+    }
 
     _feats[0]->set_bounds(lb + 2 + _feats[1]->n_params(), ub + 2 + _feats[1]->n_params(), 2 + _feats[1]->n_params(), depth + 1);
     _feats[1]->set_bounds(lb + 2, ub + 2, depth + 1);
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
index 163f37c2af9e0b5a35c95c79f227f16bfdd0d385..7588945449119618e5c76974ee6eb2ab4ac1d9fd 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp
@@ -145,8 +145,7 @@ public:
      *
      * @param prop property to fit to get the parameters
      */
-    void get_parameters(std::shared_ptr<NLOptimizer> optimizer)
-;
+    void get_parameters(std::shared_ptr<NLOptimizer> optimizer);
 
     /**
      * @brief Set the non-linear parameters
@@ -154,7 +153,9 @@ public:
     inline void set_parameters(std::vector<double> params, bool check_sz = true)
     {
         if(check_sz && (params.size() != n_params()))
+        {
             throw std::logic_error("Wrong number of parameters passed to set_parameters.");
+        }
         _params = params;
     }
 
@@ -176,7 +177,9 @@ public:
         {
             postfix << ": " << std::setprecision(13) << std::scientific << _params[0];
             for(int pp = 1; pp < _params.size(); ++pp)
+            {
                 postfix << "," << std::setprecision(13) << std::scientific << _params[pp];
+            }
         }
         cur_expr = postfix.str() + "|" + cur_expr;
         _feats[1]->update_postfix(cur_expr, false);
@@ -184,6 +187,14 @@ public:
     }
 };
 
-void generateSubParamNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound, std::shared_ptr<NLOptimizer> optimizer);
+void generateSubParamNode(
+    std::vector<node_ptr>& feat_list,
+    node_ptr feat_1,
+    node_ptr feat_2,
+    unsigned long int& feat_ind,
+    double l_bound,
+    double u_bound,
+    std::shared_ptr<NLOptimizer> optimizer
+);
 
 #endif
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
index b0224bfe973e9c9698ec046d2b8e696f3a945981..c2d84db368e51ff4383aea3d12b46713a070d940 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.cpp
@@ -4,25 +4,43 @@ void generateSubNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr
 {
     ++feat_ind;
     if(feat_1->unit() != feat_2->unit())
+    {
         return;
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
     feat_1->update_add_sub_leaves(add_sub_leaves,  1, expected_abs_tot);
     feat_2->update_add_sub_leaves(add_sub_leaves, -1, expected_abs_tot);
 
-    if((add_sub_leaves.size() < 2) || (std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0))
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if((add_sub_leaves.size() < 2) || (std::abs(leaves_v_expected) != 0))
+    {
         return;
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         return;
+    }
 
     node_ptr new_feat = std::make_shared<SubNode>(feat_1, feat_2, feat_ind);
     double* val_ptr = new_feat->value_ptr();
 
-    if(new_feat->is_const() || std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) || (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound))
+    if(
+        new_feat->is_const() ||
+        std::any_of(val_ptr, val_ptr + new_feat->n_samp(), [&u_bound](double d){return !std::isfinite(d) || (std::abs(d) > u_bound);}) ||
+        (util_funcs::max_abs_val<double>(val_ptr, new_feat->n_samp()) < l_bound)
+    )
+    {
          return;
+    }
 
     feat_list.push_back(new_feat);
 }
@@ -38,26 +56,46 @@ SubNode::SubNode(node_ptr feat_1, node_ptr feat_2, unsigned long int feat_ind, d
     OperatorNode({feat_1, feat_2}, feat_ind)
 {
     if(_feats[0]->unit() != _feats[1]->unit())
+    {
         throw InvalidFeatureException();
+    }
 
     std::map<std::string, int> add_sub_leaves;
     int expected_abs_tot = 0;
     update_add_sub_leaves(add_sub_leaves, 1, expected_abs_tot);
 
     if((add_sub_leaves.size() < 2))
+    {
         throw InvalidFeatureException();
+    }
 
-    if(std::abs(std::accumulate(add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);})) != 0)
+    int leaves_v_expected = std::accumulate(
+        add_sub_leaves.begin(), add_sub_leaves.end(), -1*expected_abs_tot, [](int tot, auto el){return tot + std::abs(el.second);}
+    );
+    if(std::abs(leaves_v_expected) != 0)
+    {
         throw InvalidFeatureException();
+    }
 
     int add_sub_tot_first = std::abs(add_sub_leaves.begin()->second);
-
-    if((std::abs(add_sub_tot_first) > 1) && std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;}))
+    if(
+        (std::abs(add_sub_tot_first) > 1) &&
+        std::all_of(add_sub_leaves.begin(), add_sub_leaves.end(), [&add_sub_tot_first](auto el){return std::abs(el.second) == add_sub_tot_first;})
+    )
+    {
         throw InvalidFeatureException();
+    }
 
     set_value();
-    if(is_nan() || is_const() || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) || (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound))
+    if(
+        is_nan() ||
+        is_const() ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) > u_bound) ||
+        (util_funcs::max_abs_val<double>(value_ptr(), _n_samp) < l_bound)
+    )
+    {
         throw InvalidFeatureException();
+    }
 }
 
 void SubNode::update_add_sub_leaves(std::map<std::string, int>& add_sub_leaves, int pl_mn, int& expected_abs_tot)
@@ -70,9 +108,13 @@ void SubNode::update_div_mult_leaves(std::map<std::string, double>& div_mult_lea
 {
     std::string key = expr();
     if(div_mult_leaves.count(key) > 0)
+    {
         div_mult_leaves[key] += fact;
+    }
     else
+    {
         div_mult_leaves[key] = fact;
+    }
 
     expected_abs_tot += std::abs(fact);
 }
@@ -97,5 +139,12 @@ void SubNode::set_value(int offset, bool for_comp)
 void SubNode::set_test_value(int offset, bool for_comp)
 {
     offset += (offset == -1);
-    allowed_op_funcs::sub(_n_test_samp, _feats[0]->test_value_ptr(2 * offset, for_comp), _feats[1]->test_value_ptr(2 * offset + 1, for_comp), 1.0, 0.0, node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp));
+    allowed_op_funcs::sub(
+        _n_test_samp,
+        _feats[0]->test_value_ptr(2 * offset, for_comp),
+        _feats[1]->test_value_ptr(2 * offset + 1, for_comp),
+        1.0,
+        0.0,
+        node_value_arrs::get_test_value_ptr(_arr_ind, _feat_ind, rung(), offset, for_comp)
+    );
 }
diff --git a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
index af0525843fb1de21da647ba4434b09a1d1c1dd46..49c80aa09536bdb7a4272ea2d09a6e8e95299c29 100644
--- a/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp
@@ -154,92 +154,92 @@ public:
     void update_div_mult_leaves(std::map<std::string, double>& div_mult_leaves, double fact, double& expected_abs_tot);
 
     #ifdef PARAMETERIZE
-        /**
-         * @brief The parameters used for introducing more non linearity in the operators
-         */
-        virtual std::vector<double> parameters(){return {};}
-
-        /**
-         * @brief Solve the non-linear optimization to set the parameters
-         * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
-         *
-         * @param prop property to fit to get the parameters
-         */
-        virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
-
-        /**
-         * @brief Set the non-linear parameters
-        */
-        virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
-
-        /**
-         * @brief Set the values of the training data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief Set the values of the test data for the feature inside of the value storage arrays
-         *
-         * @param offset(int) Key to determine which part of the temporary storage array to look into
-         * @param params pointer to the parameter values
-         */
-        void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
-
-        /**
-         * @brief The expression of the feature
-         *
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return feature expression
-         */
-        inline std::string expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "({} - ({:.10e}*{}{:+15.10e}))",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Get the latexified expression for the overall feature (From root node down)
-         *
-         * @param cap string to place in the start/end of the string
-         * @param params parameter values for non-linear operations
-         * @param depth the current depth of the node on the Binary expression tree
-         * @return the latexified expression
-         */
-        inline std::string get_latex_expr(double* params, int depth=1)
-        {
-            return fmt::format(
-                "\\left({} - \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[0],
-                (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
-                params[1]
-            );
-        }
-
-        /**
-         * @brief Set the bounds for the nl parameterization
-         *
-         * @param lb pointer to the lower bounds data
-         * @param ub pointer to the upper bounds data
-         */
-        void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
-
-        /**
-         * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
-         *
-         * @param params pointer to the parameters
-         * @param dfdp pointer to where the feature derivative pointers are located
-         */
-        inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp, _n_samp, -1.0);}
+    /**
+     * @brief The parameters used for introducing more non linearity in the operators
+     */
+    virtual std::vector<double> parameters(){return {};}
+
+    /**
+     * @brief Solve the non-linear optimization to set the parameters
+     * @details Fits the data points from _feats->value_ptr and prop to get the parameters for the feature
+     *
+     * @param prop property to fit to get the parameters
+     */
+    virtual void get_parameters(std::shared_ptr<NLOptimizer> optimizer){return;}
+
+    /**
+     * @brief Set the non-linear parameters
+    */
+    virtual void set_parameters(std::vector<double>, bool check_sz=true){return;}
+
+    /**
+     * @brief Set the values of the training data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief Set the values of the test data for the feature inside of the value storage arrays
+     *
+     * @param offset(int) Key to determine which part of the temporary storage array to look into
+     * @param params pointer to the parameter values
+     */
+    void set_test_value(const double* params, int offset=-1, bool for_comp=false, int depth=1);
+
+    /**
+     * @brief The expression of the feature
+     *
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return feature expression
+     */
+    inline std::string expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "({} - ({:.10e}*{}{:+15.10e}))",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[1]->expr(params + 2, depth + 1) : _feats[1]->expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Get the latexified expression for the overall feature (From root node down)
+     *
+     * @param cap string to place in the start/end of the string
+     * @param params parameter values for non-linear operations
+     * @param depth the current depth of the node on the Binary expression tree
+     * @return the latexified expression
+     */
+    inline std::string get_latex_expr(double* params, int depth=1)
+    {
+        return fmt::format(
+            "\\left({} - \\left({:.3e}*{}{:+8.3e}\\right)\\right)",
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + _feats[1]->n_params() + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[0],
+            (depth < nlopt_wrapper::MAX_PARAM_DEPTH ? _feats[0]->get_latex_expr(params + 2, depth + 1) : _feats[0]->get_latex_expr()),
+            params[1]
+        );
+    }
+
+    /**
+     * @brief Set the bounds for the nl parameterization
+     *
+     * @param lb pointer to the lower bounds data
+     * @param ub pointer to the upper bounds data
+     */
+    void set_bounds(double* lb, double* ub, int from_parent=2, int depth = 1);
+
+    /**
+     * @brief Calculates the derivative of an operation with respect to the parameters for a given sample
+     *
+     * @param params pointer to the parameters
+     * @param dfdp pointer to where the feature derivative pointers are located
+     */
+    inline void param_derivative(const double* params, double* dfdp){std::fill_n(dfdp, _n_samp, -1.0);}
     #endif
 };
 void generateSubNode(std::vector<node_ptr>& feat_list, node_ptr feat_1, node_ptr feat_2, unsigned long int& feat_ind, double l_bound, double u_bound);
diff --git a/src/feature_creation/node/operator_nodes/allowed_ops.hpp b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
index af66bd94789e4a2a87f0e7473b5fad41817ff938..7e932f631a2eb729f20b0a382b5d0248cc20fab3 100644
--- a/src/feature_creation/node/operator_nodes/allowed_ops.hpp
+++ b/src/feature_creation/node/operator_nodes/allowed_ops.hpp
@@ -8,41 +8,41 @@
 #define ALLOWED_OP_NODES
 
 #ifdef PARAMETERIZE
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/add/parameterized_add.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/parameterized_subtract.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs/parameterized_absolute_value.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/parameterized_absolute_difference.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/mult/parameterized_multiply.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/div/parameterized_divide.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/inv/parameterized_inverse.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/exp/parameterized_exponential.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/parameterized_negative_exponential.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/log/parameterized_log.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sq/parameterized_square.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cb/parameterized_cube.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/parameterized_sixth_power.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/parameterized_square_root.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/parameterized_cube_root.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sin/parameterized_sin.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cos/parameterized_cos.hpp>
 #else
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp>
-    #include <feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/add/add.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sub/subtract.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs/absolute_value.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/abs_diff/absolute_difference.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/mult/multiply.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/div/divide.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/inv/inverse.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/exp/exponential.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/neg_exp/negative_exponential.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/log/log.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sq/square.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cb/cube.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/six_pow/sixth_power.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sqrt/square_root.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cbrt/cube_root.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/sin/sin.hpp>
+#include <feature_creation/node/operator_nodes/allowed_operator_nodes/cos/cos.hpp>
 #endif
 
 #include <map>
diff --git a/src/feature_creation/node/utils.cpp b/src/feature_creation/node/utils.cpp
index 0763e1f7d2e58d789eed2a938970b52318b6f879..f357bbdf74c540e5cb5b57ca3afc0f4d38587b11 100644
--- a/src/feature_creation/node/utils.cpp
+++ b/src/feature_creation/node/utils.cpp
@@ -13,8 +13,12 @@ node_ptr str2node::postfix2node(std::string postfix_expr, const std::vector<node
 {
     std::vector<node_ptr> stack;
     std::vector<std::string> postfix_split = str_utils::split_string_trim(postfix_expr, "|");
+
     if(postfix_split.size() == 1)
+    {
         return phi_0[std::stoi(postfix_split[0])];
+    }
+
     for(int ff = 0; ff < postfix_split.size(); ++ff)
     {
         std::string term = postfix_split[ff];
@@ -29,288 +33,294 @@ node_ptr str2node::postfix2node(std::string postfix_expr, const std::vector<node
             if(op_terms[0] == "add")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 2] = std::make_shared<AddNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 2] = std::make_shared<AddNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 2] = std::make_shared<AddParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 2], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 2] = std::make_shared<AddNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 2] = std::make_shared<AddParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 2], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 2] = std::make_shared<AddNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
                 stack.pop_back();
             }
             else if(op_terms[0] == "sub")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 2] = std::make_shared<SubNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 2] = std::make_shared<SubNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 2] = std::make_shared<SubParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 2], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 2] = std::make_shared<SubNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 2] = std::make_shared<SubParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 2], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 2] = std::make_shared<SubNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
                 stack.pop_back();
             }
             else if(op_terms[0] == "abd")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 2] = std::make_shared<AbsDiffNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 2] = std::make_shared<AbsDiffNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 2] = std::make_shared<AbsDiffParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 2], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 2] = std::make_shared<AbsDiffNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 2] = std::make_shared<AbsDiffParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 2], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 2] = std::make_shared<AbsDiffNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
                 stack.pop_back();
             }
             else if(op_terms[0] == "mult")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 2] = std::make_shared<MultNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 2] = std::make_shared<MultNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 2] = std::make_shared<MultParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 2], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 2] = std::make_shared<MultNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 2] = std::make_shared<MultParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 2], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 2] = std::make_shared<MultNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
                 stack.pop_back();
             }
             else if(op_terms[0] == "div")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 2] = std::make_shared<DivNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 2] = std::make_shared<DivNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 2] = std::make_shared<DivParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 2], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 2] = std::make_shared<DivNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 2] = std::make_shared<DivParamNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 2], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 2] = std::make_shared<DivNode>(stack[stack.size() - 2], stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
                 stack.pop_back();
             }
             else if(op_terms[0] == "abs")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<AbsNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<AbsNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<AbsParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<AbsNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<AbsParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<AbsNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "inv")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<InvNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<InvNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<InvParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<InvNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<InvParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<InvNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "exp")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<ExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<ExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<ExpParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<ExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<ExpParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<ExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "nexp")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<NegExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<NegExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<NegExpParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<NegExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<NegExpParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<NegExpNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "log")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<LogNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<LogNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<LogParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<LogNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<LogParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<LogNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "sin")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<SinNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<SinNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SinParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SinNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<SinParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<SinNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "cos")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<CosNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<CosNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CosParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CosNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<CosParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<CosNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "sq")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<SqNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<SqNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SqParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SqNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<SqParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<SqNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "sqrt")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<SqrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<SqrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SqrtParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SqrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<SqrtParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<SqrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "cb")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<CbNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<CbNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CbParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CbNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<CbParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<CbNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "cbrt")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<CbrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<CbrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CbrtParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<CbrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<CbrtParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<CbrtNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else if(op_terms[0] == "sp")
             {
                 #ifndef PARAMETERIZE
-                    stack[stack.size() - 1] = std::make_shared<SixPowNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                stack[stack.size() - 1] = std::make_shared<SixPowNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
                 #else
-                    if(op_terms.size() > 1)
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SixPowParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                        set_parameters(stack[stack.size() - 1], op_terms);
-                    }
-                    else
-                    {
-                        stack[stack.size() - 1] = std::make_shared<SixPowNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
-                    }
+                if(op_terms.size() > 1)
+                {
+                    stack[stack.size() - 1] = std::make_shared<SixPowParamNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                    set_parameters(stack[stack.size() - 1], op_terms);
+                }
+                else
+                {
+                    stack[stack.size() - 1] = std::make_shared<SixPowNode>(stack[stack.size() - 1], feat_ind, 1e-50, 1e50);
+                }
                 #endif
             }
             else
+            {
                 throw std::logic_error("Term in postfix expression does not represent a node");
+            }
             ++feat_ind;
         }
     }
     if(stack.size() != 1)
-        throw std::logic_error("Went through postfix expression and still more than one node in the list. This must be an invalid expression: " + postfix_expr + ".");
+    {
+        throw std::logic_error(
+            "Went through postfix expression and still more than one node in the list. This must be an invalid expression: " + postfix_expr + "."
+        );
+    }
     return stack[0];
 }
 
@@ -333,7 +343,9 @@ std::vector<node_ptr> str2node::phi_selected_from_file(std::string filename, std
     while(std::getline(file_stream, line))
     {
         if(line[0] == '#')
+        {
             continue;
+        }
 
         node_value_arrs::resize_d_matrix_arr(1);
         boost::algorithm::split(split_line, line, boost::algorithm::is_any_of("\t "), boost::token_compress_on);
@@ -368,7 +380,9 @@ std::vector<node_ptr> str2node::phi_from_file(std::string filename, std::vector<
     {
         ++numLines;
         if(line[0] == '#')
+        {
             continue;
+        }
         try
         {
             node_ptr feat = postfix2node(line, phi_0, feat_ind);
@@ -385,49 +399,89 @@ std::vector<node_ptr> str2node::phi_from_file(std::string filename, std::vector<
     }
     file_stream.close();
     if(numLines < 1)
+    {
         throw std::logic_error("File, " + filename + " not present");
+    }
     return phi;
 }
 
 std::string node_identifier::feature_type_to_string(NODE_TYPE nt)
 {
     if(nt == NODE_TYPE::FEAT)
+    {
         return "feature";
+    }
     if(nt == NODE_TYPE::MODEL_FEATURE)
+    {
         return "model";
+    }
     if(nt == NODE_TYPE::ADD)
+    {
         return "add";
+    }
     if(nt == NODE_TYPE::SUB)
+    {
         return "sub";
+    }
     if(nt == NODE_TYPE::ABS_DIFF)
+    {
         return "abs_diff";
+    }
     if(nt == NODE_TYPE::MULT)
+    {
         return "mult";
+    }
     if(nt == NODE_TYPE::DIV)
+    {
         return "div";
+    }
     if(nt == NODE_TYPE::EXP)
+    {
         return "exp";
+    }
     if(nt == NODE_TYPE::NEG_EXP)
+    {
         return "neg_exp";
+    }
     if(nt == NODE_TYPE::INV)
+    {
         return "inv";
+    }
     if(nt == NODE_TYPE::SQ)
+    {
         return "sq";
+    }
     if(nt == NODE_TYPE::CB)
+    {
         return "cb";
+    }
     if(nt == NODE_TYPE::SIX_POW)
+    {
         return "six_pow";
+    }
     if(nt == NODE_TYPE::SQRT)
+    {
         return "sqrt";
+    }
     if(nt == NODE_TYPE::CBRT)
+    {
         return "cbrt";
+    }
     if(nt == NODE_TYPE::LOG)
+    {
         return "log";
+    }
     if(nt == NODE_TYPE::ABS)
+    {
         return "abs";
+    }
     if(nt == NODE_TYPE::SIN)
+    {
         return "sin";
+    }
     if(nt == NODE_TYPE::COS)
+    {
         return "cos";
+    }
     throw std::logic_error("Invalid feature type");
 }
diff --git a/src/feature_creation/node/utils.hpp b/src/feature_creation/node/utils.hpp
index b06aff48d27f8e43c93efdd65c577870e6e311c4..d08d85f985ac1b9f155caf59d44d75fb3b4af01e 100644
--- a/src/feature_creation/node/utils.hpp
+++ b/src/feature_creation/node/utils.hpp
@@ -17,57 +17,57 @@
 
 namespace str2node
 {
-    /**
-     * @brief Convert a postfix expression into a node_ptr
-     * @details Creates a stack to iteratively generate the feature represented by the expression
-     *
-     * @param postfix_expr The postfix expression of the feature node
-     * @param phi_0 The initial feature set
-     * @param feat_ind The desired feature index
-     * @return The feature node described by the postfix expression
-     */
-    node_ptr postfix2node(std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind);
+/**
+ * @brief Convert a postfix expression into a node_ptr
+ * @details Creates a stack to iteratively generate the feature represented by the expression
+ *
+ * @param postfix_expr The postfix expression of the feature node
+ * @param phi_0 The initial feature set
+ * @param feat_ind The desired feature index
+ * @return The feature node described by the postfix expression
+ */
+node_ptr postfix2node(std::string postfix_expr, const std::vector<node_ptr>& phi_0, unsigned long int& feat_ind);
 
-    /**
-     * @brief Convert a feature_space/selected_features.txt into a phi_selected;
-     * @details Read in the file to get the postfix expressions and regenerate the selected features using phi_0
-     *
-     * @param filename The name of the feature_space/selected_features.txt file
-     * @param phi_0 The initial feature space
-     *
-     * @return The selected feature set from the file
-     */
-    std::vector<node_ptr> phi_selected_from_file(std::string filename, std::vector<node_ptr> phi_0);
+/**
+ * @brief Convert a feature_space/selected_features.txt into a phi_selected;
+ * @details Read in the file to get the postfix expressions and regenerate the selected features using phi_0
+ *
+ * @param filename The name of the feature_space/selected_features.txt file
+ * @param phi_0 The initial feature space
+ *
+ * @return The selected feature set from the file
+ */
+std::vector<node_ptr> phi_selected_from_file(std::string filename, std::vector<node_ptr> phi_0);
 
-    /**
-     * @brief Convert a text file into the feature space
-     * @details Read in the file to get the postfix expressions and regenerate the features using phi_0
-     *
-     * @param filename The name of the file storing all the features
-     * @param phi_0 The initial feature space
-     *
-     * @return The feature set defined from the file
-     */
-    std::vector<node_ptr> phi_from_file(std::string filename, std::vector<node_ptr> phi_0);
+/**
+ * @brief Convert a text file into the feature space
+ * @details Read in the file to get the postfix expressions and regenerate the features using phi_0
+ *
+ * @param filename The name of the file storing all the features
+ * @param phi_0 The initial feature space
+ *
+ * @return The feature set defined from the file
+ */
+std::vector<node_ptr> phi_from_file(std::string filename, std::vector<node_ptr> phi_0);
 
-    #ifdef PARAMETERIZE
-        /**
-         * @brief set the parameters of a node from the postfix expression
-         *
-         * @param feat Features whose parameters need to be set
-         * @param op_terms term used to define the object
-         */
-        void set_parameters(node_ptr feat, std::vector<std::string> op_terms);
-    #endif
+#ifdef PARAMETERIZE
+/**
+ * @brief set the parameters of a node from the postfix expression
+ *
+ * @param feat Features whose parameters need to be set
+ * @param op_terms term used to define the object
+ */
+void set_parameters(node_ptr feat, std::vector<std::string> op_terms);
+#endif
 }
 namespace node_identifier
 {
-    /**
-     * @brief Convert a node type into the corresponding string identifier
-     *
-     * @param nt node type
-     * @return string representation of the node type
-     */
-    std::string feature_type_to_string(NODE_TYPE nt);
+/**
+ * @brief Convert a node type into the corresponding string identifier
+ *
+ * @param nt node type
+ * @return string representation of the node type
+ */
+std::string feature_type_to_string(NODE_TYPE nt);
 }
 #endif
diff --git a/src/feature_creation/node/value_storage/nodes_value_containers.cpp b/src/feature_creation/node/value_storage/nodes_value_containers.cpp
index faaba003b3810b2083addda1c3b35a9912a61b08..6adcfec0792ba2ce21990f4d98db2749029243f7 100644
--- a/src/feature_creation/node/value_storage/nodes_value_containers.cpp
+++ b/src/feature_creation/node/value_storage/nodes_value_containers.cpp
@@ -25,16 +25,24 @@ std::vector<double> node_value_arrs::TEMP_STORAGE_TEST_ARR;
 void node_value_arrs::initialize_values_arr(int n_samples, int n_samples_test, int n_primary_feat, int max_rung, bool set_task_sz)
 {
     if(max_rung < 0)
+    {
         throw std::logic_error("Maximum rung of the features is less than 0");
+    }
 
     if(max_rung == 0)
+    {
         std::cerr << "Warning requested calculation has a maximum rung of 0" << std::endl;
+    }
 
     if(set_task_sz)
+    {
         TASK_SZ_TRAIN = {n_samples};
+    }
 
     if(set_task_sz)
+    {
         TASK_SZ_TEST = {n_samples_test};
+    }
 
     N_SAMPLES = n_samples;
     N_SAMPLES_TEST = n_samples_test;
@@ -70,26 +78,34 @@ void node_value_arrs::initialize_values_arr(std::vector<int> task_sz_train, std:
 void node_value_arrs::set_task_sz_train(std::vector<int> task_sz_train)
 {
     if(std::accumulate(task_sz_train.begin(), task_sz_train.end(), 0) != N_SAMPLES)
+    {
         throw  std::logic_error("The total number of samples has changed, task_sz_train is wrong.");
+    }
     TASK_SZ_TRAIN = task_sz_train;
 }
 
 void node_value_arrs::set_task_sz_test(std::vector<int> task_sz_test)
 {
     if(std::accumulate(task_sz_test.begin(), task_sz_test.end(), 0) != N_SAMPLES_TEST)
+    {
         throw  std::logic_error("The total number of test samples has changed, task_sz_test is wrong.");
+    }
     TASK_SZ_TEST = task_sz_test;
 }
 
 void node_value_arrs::resize_values_arr(int n_dims, int n_feat, bool use_temp)
 {
     if(n_dims > MAX_RUNG)
+    {
         throw std::logic_error("Requested rung is larger MAX_RUNG.");
+    }
 
     N_RUNGS_STORED = n_dims;
     N_STORE_FEATURES = n_feat;
     if(N_STORE_FEATURES == 0)
+    {
         N_STORE_FEATURES = 1;
+    }
 
     VALUES_ARR.resize(N_STORE_FEATURES * N_SAMPLES);
     VALUES_ARR.shrink_to_fit();
@@ -128,13 +144,17 @@ double* node_value_arrs::get_value_ptr(unsigned long int arr_ind, unsigned long
     if(modify_reg && (rung <= N_RUNGS_STORED))
     {
         if(arr_ind > N_STORE_FEATURES)
+        {
             throw std::logic_error("Requested arr_ind (" + std::to_string(arr_ind) + ") is too high (max " + std::to_string(N_STORE_FEATURES) +")");
+        }
         return  access_value_arr(arr_ind);
     }
 
     int op_slot = get_op_slot(rung, offset, for_comp);
     temp_storage_reg(arr_ind, op_slot) = feat_ind * modify_reg - (!modify_reg);
-    return access_temp_storage((arr_ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1));
+    return access_temp_storage(
+        (arr_ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+    );
 }
 
 double* node_value_arrs::get_test_value_ptr(unsigned long int arr_ind, unsigned long int feat_ind, int rung, int offset, bool for_comp, bool modify_reg)
@@ -142,13 +162,17 @@ double* node_value_arrs::get_test_value_ptr(unsigned long int arr_ind, unsigned
     if(modify_reg && (rung <= N_RUNGS_STORED))
     {
         if(arr_ind > N_STORE_FEATURES)
+        {
             throw std::logic_error("Requested arr_ind (" + std::to_string(arr_ind) + ") is too high (max " + std::to_string(N_STORE_FEATURES) +")");
+        }
         return  access_test_value_arr(arr_ind);
     }
 
     int op_slot = get_op_slot(rung, offset, for_comp);
     temp_storage_test_reg(arr_ind, op_slot) = feat_ind * modify_reg - (!modify_reg);
-    return access_temp_storage_test((arr_ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1));
+    return access_temp_storage_test(
+        (arr_ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+    );
 }
 
 void node_value_arrs::initialize_d_matrix_arr()
diff --git a/src/feature_creation/node/value_storage/nodes_value_containers.hpp b/src/feature_creation/node/value_storage/nodes_value_containers.hpp
index 84c7d8a6a8fb057df2650f5f7565cab5111fecf8..e88ddde5282dc82c945d662fbfcff0f6665878f5 100644
--- a/src/feature_creation/node/value_storage/nodes_value_containers.hpp
+++ b/src/feature_creation/node/value_storage/nodes_value_containers.hpp
@@ -127,7 +127,10 @@ namespace node_value_arrs
      *
      * @return The operator slot to use
      */
-    inline int get_op_slot(int rung, int offset, bool for_comp){return std::abs(N_OP_SLOTS / (1 + !for_comp) - static_cast<int>(std::pow(2, MAX_RUNG - rung)) - offset);}
+    inline int get_op_slot(int rung, int offset, bool for_comp)
+    {
+        return std::abs(N_OP_SLOTS / (1 + !for_comp) - static_cast<int>(std::pow(2, MAX_RUNG - rung)) - offset);
+    }
 
     /**
      * @brief Get a reference slot/feature register of the training data
@@ -137,7 +140,12 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and op_slot
      */
-    inline int& temp_storage_reg(unsigned long int ind, int op_slot=0){return TEMP_STORAGE_REG[(ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)];}
+    inline int& temp_storage_reg(unsigned long int ind, int op_slot=0)
+    {
+        return TEMP_STORAGE_REG[
+            (ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+        ];
+    }
 
     /**
      * @brief Get a reference slot/feature register of the test data
@@ -147,7 +155,12 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and op_slot
      */
-    inline int& temp_storage_test_reg(unsigned long int ind, int op_slot=0){return TEMP_STORAGE_TEST_REG[(ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)];}
+    inline int& temp_storage_test_reg(unsigned long int ind, int op_slot=0)
+    {
+        return TEMP_STORAGE_TEST_REG[
+            (ind % N_STORE_FEATURES) + (op_slot % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+        ];
+    }
 
     /**
      * @brief Get a reference slot/feature register of the training data
@@ -158,7 +171,14 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and offset
      */
-    inline int& temp_storage_reg(unsigned long int ind, int rung, int offset, bool for_comp){return TEMP_STORAGE_REG[(ind % N_STORE_FEATURES) + (get_op_slot(rung, offset, for_comp) % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)];}
+    inline int& temp_storage_reg(unsigned long int ind, int rung, int offset, bool for_comp)
+    {
+        return TEMP_STORAGE_REG[
+            (ind % N_STORE_FEATURES) +
+            (get_op_slot(rung, offset, for_comp) % N_OP_SLOTS) * N_STORE_FEATURES +
+            omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+        ];
+    }
 
     /**
      * @brief Get a reference slot/feature register of the test data
@@ -169,7 +189,14 @@ namespace node_value_arrs
      *
      * @return The register element for a given feature index and offset
      */
-    inline int& temp_storage_test_reg(unsigned long int ind, int rung, int offset, bool for_comp){return TEMP_STORAGE_TEST_REG[(ind % N_STORE_FEATURES) + (get_op_slot(rung, offset, for_comp) % N_OP_SLOTS) * N_STORE_FEATURES + omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)];}
+    inline int& temp_storage_test_reg(unsigned long int ind, int rung, int offset, bool for_comp)
+    {
+        return TEMP_STORAGE_TEST_REG[
+            (ind % N_STORE_FEATURES) +
+            (get_op_slot(rung, offset, for_comp) % N_OP_SLOTS) * N_STORE_FEATURES +
+            omp_get_thread_num() * (N_STORE_FEATURES * N_OP_SLOTS + 1)
+        ];
+    }
 
     /**
      * @brief Access element of the permanent training data storage array
@@ -247,7 +274,10 @@ namespace node_value_arrs
      * @brief Flush the temporary storage register (training data)
      * @details Reset all slots in the register to -1
      */
-    inline void clear_temp_reg_thread(){std::fill_n(TEMP_STORAGE_REG.begin() + (N_STORE_FEATURES * N_OP_SLOTS + 1) * omp_get_thread_num(), N_STORE_FEATURES * N_OP_SLOTS + 1, -1);}
+    inline void clear_temp_reg_thread()
+    {
+        std::fill_n(TEMP_STORAGE_REG.begin() + (N_STORE_FEATURES * N_OP_SLOTS + 1) * omp_get_thread_num(), N_STORE_FEATURES * N_OP_SLOTS + 1, -1);
+    }
 
     /**
      * @brief Flush the temporary storage register (test data)