Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename all macros and definitions prefixed by 'PROXNLP_*' #39

Merged
merged 2 commits into from
Dec 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,12 @@ endif(INITIALIZE_WITH_NAN)

if(CHECK_RUNTIME_MALLOC)
message(STATUS "Check if some memory allocations are performed at runtime.")
add_compile_definitions(PROXNLP_EIGEN_CHECK_MALLOC)
add_compile_definitions(PROXSUITE_NLP_EIGEN_CHECK_MALLOC)
add_definitions(-DEIGEN_RUNTIME_NO_MALLOC)
endif(CHECK_RUNTIME_MALLOC)

if(ENABLE_TEMPLATE_INSTANTIATION)
add_definitions(-DPROXNLP_ENABLE_TEMPLATE_INSTANTIATION)
add_definitions(-DPROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION)
endif(ENABLE_TEMPLATE_INSTANTIATION)

macro(TAG_LIBRARY_VERSION target)
Expand All @@ -105,15 +105,15 @@ add_project_dependency(fmt "9.1.0...<11" REQUIRED PKG_CONFIG_REQUIRES "fmt >= 9.

if(BUILD_WITH_PROXSUITE)
add_project_dependency(proxsuite REQUIRED)
add_definitions(-DPROXNLP_ENABLE_PROXSUITE_LDLT)
add_definitions(-DPROXSUITE_NLP_USE_PROXSUITE_LDLT)
endif()

# Variable containing all the cflags definition relative to optional dependencies
set(CFLAGS_DEPENDENCIES)

if(BUILD_WITH_OPENMP_SUPPORT)
find_package(OpenMP REQUIRED)
add_compile_definitions(PROXNLP_MULTITHREADING)
add_compile_definitions(PROXSUITE_NLP_MULTITHREADING)
endif(BUILD_WITH_OPENMP_SUPPORT)

set(BOOST_REQUIRED_COMPONENTS filesystem serialization system)
Expand All @@ -137,7 +137,7 @@ endif(BUILD_PYTHON_INTERFACE)
if(BUILD_WITH_PINOCCHIO_SUPPORT)
message(STATUS "Building with Pinocchio support.")
add_project_dependency(pinocchio REQUIRED PKG_CONFIG_REQUIRES "pinocchio >= 2.9.1")
add_compile_definitions(PROXNLP_WITH_PINOCCHIO)
add_compile_definitions(PROXSUITE_NLP_WITH_PINOCCHIO)
endif(BUILD_WITH_PINOCCHIO_SUPPORT)

# --- MAIN LIBRARY ----------------------------------------
Expand Down Expand Up @@ -278,13 +278,13 @@ endmacro(
var_value)

if(BUILD_WITH_PINOCCHIO_SUPPORT)
export_variable(PROXNLP_WITH_PINOCCHIO_SUPPORT ON)
export_variable(PROXSUITE_NLP_WITH_PINOCCHIO_SUPPORT ON)
endif()
if(BUILD_WITH_CASADI_SUPPORT)
export_variable(PROXNLP_USE_CASADI ON)
export_variable(PROXSUITE_NLP_USE_CASADI ON)
endif()
if(BUILD_PYTHON_INTERFACE)
export_variable(PROXNLP_WITH_PYTHON_INTERFACE ON)
export_variable(PROXSUITE_NLP_WITH_PYTHON_INTERFACE ON)
endif()

pkg_config_append_libs(${PROJECT_NAME})
Expand Down
2 changes: 1 addition & 1 deletion doc/Doxyfile.extra.in
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
PREDEFINED += EIGEN_MAKE_ALIGNED_OPERATOR_NEW \
PROXNLP_DYNAMIC_TYPEDEFS
PROXSUITE_NLP_DYNAMIC_TYPEDEFS

FULL_PATH_NAMES = YES
EXCLUDE_SYMBOLS = *::internal, internal::*, *::internal::*
Expand Down
2 changes: 1 addition & 1 deletion examples/l1_qp.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Solve an L1-penalized QP using PROXNLP.
Solve an L1-penalized QP using proxsuite_nlp.
"""
import proxsuite_nlp
import numpy as np
Expand Down
4 changes: 2 additions & 2 deletions examples/solo_inverse_geom.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
Inverse kinematics with friction cone constraint.
We compare PROXNLP with the solver IPOPT.
We compare proxsuite_nlp with the solver IPOPT.

min Dq, f || q - q0 ||**2 + || f ||**2

Expand Down Expand Up @@ -213,7 +213,7 @@ def compute_cost(q_ref, fs):
viz.display(qopt_ipopt)


# Problem PROXNLP
# Problem proxsuite_nlp

xspace = MultibodyPhaseSpace(model)
pb_space = VectorSpace(4 * 3 + (xspace.ndx))
Expand Down
2 changes: 1 addition & 1 deletion examples/ur5-ik.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#include <boost/filesystem/path.hpp>

using Scalar = double;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

using namespace proxnlp;
namespace pin = pinocchio;
Expand Down
4 changes: 2 additions & 2 deletions include/proxnlp/constraint-base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ namespace proxnlp {
template <typename _Scalar> struct ConstraintSetBase {
public:
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using ActiveType = Eigen::Matrix<bool, Eigen::Dynamic, 1>;
using Self = ConstraintSetBase<Scalar>;

Expand Down Expand Up @@ -117,7 +117,7 @@ template <typename _Scalar> struct ConstraintSetBase {
*/
template <typename _Scalar> struct ConstraintObjectTpl {
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

using FunctionType = C2FunctionTpl<Scalar>;
using ConstraintSet = ConstraintSetBase<Scalar>;
Expand Down
2 changes: 1 addition & 1 deletion include/proxnlp/context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ namespace context {

using Scalar = double;

PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using VectorXBool = Eigen::Matrix<bool, Eigen::Dynamic, 1>;

using BCLParams = ::proxnlp::BCLParamsTpl<Scalar>;
Expand Down
8 changes: 4 additions & 4 deletions include/proxnlp/cost-function.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ template <typename Scalar>
auto downcast_function_to_cost(const shared_ptr<C2FunctionTpl<Scalar>> &func)
-> shared_ptr<CostFunctionBaseTpl<Scalar>> {
if (func->nr() != 1) {
PROXNLP_RUNTIME_ERROR(
PROXSUITE_NLP_RUNTIME_ERROR(
"Function cannot be cast to cost (codimension nr != 1).");
}
return std::make_shared<func_to_cost<Scalar>>(func);
Expand All @@ -29,7 +29,7 @@ template <typename _Scalar>
struct CostFunctionBaseTpl : public C2FunctionTpl<_Scalar> {
public:
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using Base = C2FunctionTpl<Scalar>;

CostFunctionBaseTpl(const int nx, const int ndx) : Base(nx, ndx, 1) {}
Expand Down Expand Up @@ -90,7 +90,7 @@ struct CostFunctionBaseTpl : public C2FunctionTpl<_Scalar> {

template <typename _Scalar> struct func_to_cost : CostFunctionBaseTpl<_Scalar> {
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using Base = CostFunctionBaseTpl<Scalar>;
using C2Function = C2FunctionTpl<Scalar>;

Expand Down Expand Up @@ -118,6 +118,6 @@ template <typename _Scalar> struct func_to_cost : CostFunctionBaseTpl<_Scalar> {

} // namespace proxnlp

#ifdef PROXNLP_ENABLE_TEMPLATE_INSTANTIATION
#ifdef PROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION
#include "proxnlp/cost-function.txx"
#endif
4 changes: 2 additions & 2 deletions include/proxnlp/cost-sum.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace proxnlp {
template <typename _Scalar> struct CostSumTpl : CostFunctionBaseTpl<_Scalar> {
public:
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using Base = CostFunctionBaseTpl<Scalar>;
using BasePtr = shared_ptr<Base>;

Expand Down Expand Up @@ -114,6 +114,6 @@ template <typename _Scalar> struct CostSumTpl : CostFunctionBaseTpl<_Scalar> {

#include "proxnlp/cost-sum.hxx"

#ifdef PROXNLP_ENABLE_TEMPLATE_INSTANTIATION
#ifdef PROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION
#include "proxnlp/cost-sum.txx"
#endif
15 changes: 8 additions & 7 deletions include/proxnlp/exceptions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,23 @@
#include <stdexcept>
#include <fmt/core.h>

#define PROXNLP_RUNTIME_ERROR(msg) \
#define PROXSUITE_NLP_RUNTIME_ERROR(msg) \
throw ::proxnlp::RuntimeError( \
fmt::format("{}({}): {}", __FILE__, __LINE__, msg))

#define PROXNLP_DIM_CHECK(x, nx) \
#define PROXSUITE_NLP_DIM_CHECK(x, nx) \
if (x.size() != nx) \
PROXNLP_RUNTIME_ERROR(fmt::format( \
PROXSUITE_NLP_RUNTIME_ERROR(fmt::format( \
"Input size invalid (expected {:d}, got {:d})", nx, x.size()))

#define PROXNLP_RAISE_IF_NAN(value) \
#define PROXSUITE_NLP_RAISE_IF_NAN(value) \
if (::proxnlp::math::check_value(value)) \
PROXNLP_RUNTIME_ERROR("Encountered NaN.\n")
PROXSUITE_NLP_RUNTIME_ERROR("Encountered NaN.\n")

#define PROXNLP_RAISE_IF_NAN_NAME(value, name) \
#define PROXSUITE_NLP_RAISE_IF_NAN_NAME(value, name) \
if (::proxnlp::math::check_value(value)) \
PROXNLP_RUNTIME_ERROR(fmt::format("Encountered NaN for value {:s}.\n", name))
PROXSUITE_NLP_RUNTIME_ERROR( \
fmt::format("Encountered NaN for value {:s}.\n", name))

namespace proxnlp {

Expand Down
8 changes: 4 additions & 4 deletions include/proxnlp/function-base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ template <typename _Scalar> struct BaseFunctionTpl : math_types<_Scalar> {

public:
using Scalar = _Scalar;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

BaseFunctionTpl(const int nx, const int ndx, const int nr)
: nx_(nx), ndx_(ndx), nr_(nr) {}
Expand Down Expand Up @@ -46,7 +46,7 @@ struct C1FunctionTpl : public BaseFunctionTpl<_Scalar> {
using Scalar = _Scalar;
using Base = BaseFunctionTpl<_Scalar>;
using Base::Base;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

/// @brief Jacobian matrix of the constraint function.
virtual void computeJacobian(const ConstVectorRef &x,
Expand All @@ -72,7 +72,7 @@ struct C2FunctionTpl : public C1FunctionTpl<_Scalar> {
using Scalar = _Scalar;
using Base = C1FunctionTpl<_Scalar>;
using Base::Base;
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

/// @brief Vector-hessian product.
virtual void vectorHessianProduct(const ConstVectorRef &,
Expand All @@ -84,6 +84,6 @@ struct C2FunctionTpl : public C1FunctionTpl<_Scalar> {

} // namespace proxnlp

#ifdef PROXNLP_ENABLE_TEMPLATE_INSTANTIATION
#ifdef PROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION
#include "proxnlp/function-base.txx"
#endif
6 changes: 3 additions & 3 deletions include/proxnlp/function-ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ template <typename _Scalar> struct ComposeFunctionTpl : C2FunctionTpl<_Scalar> {
using Base::computeJacobian;
using Base::vectorHessianProduct;

PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

ComposeFunctionTpl(const shared_ptr<Base> &left,
const shared_ptr<Base> &right)
: Base(right->nx(), right->ndx(), left->nr()), left_(left),
right_(right) {
if (left->nx() != right->nr()) {
PROXNLP_RUNTIME_ERROR(fmt::format(
PROXSUITE_NLP_RUNTIME_ERROR(fmt::format(
"Incompatible dimensions ({:d} and {:d}).", left->nx(), right->nr()));
}
assert(left->nx() == right->nr());
Expand Down Expand Up @@ -59,6 +59,6 @@ auto compose(const shared_ptr<C2FunctionTpl<Scalar>> &left,

} // namespace proxnlp

#ifdef PROXNLP_ENABLE_TEMPLATE_INSTANTIATION
#ifdef PROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION
#include "proxnlp/function-ops.txx"
#endif
2 changes: 1 addition & 1 deletion include/proxnlp/helpers/history-callback.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ template <typename Scalar> struct history_callback : base_callback<Scalar> {
: store_primal_dual_vars_(store_pd_vars), store_values_(store_values),
store_residuals_(store_residuals) {}

PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);

struct {
std::vector<VectorXs> xs;
Expand Down
10 changes: 5 additions & 5 deletions include/proxnlp/ldlt-allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include "proxnlp/linalg/block-ldlt.hpp"
#include "proxnlp/linalg/bunchkaufman.hpp"
#ifdef PROXNLP_ENABLE_PROXSUITE_LDLT
#ifdef PROXSUITE_NLP_USE_PROXSUITE_LDLT
#include "proxnlp/linalg/proxsuite-ldlt-wrap.hpp"
#endif
#include <boost/variant.hpp>
Expand Down Expand Up @@ -36,7 +36,7 @@ template <typename Scalar,
using LDLTVariant =
boost::variant<linalg::DenseLDLT<Scalar>, linalg::BlockLDLT<Scalar>,
Eigen::LDLT<MatrixType>, Eigen::BunchKaufman<MatrixType>
#ifdef PROXNLP_ENABLE_PROXSUITE_LDLT
#ifdef PROXSUITE_NLP_USE_PROXSUITE_LDLT
,
linalg::ProxSuiteLDLTWrapper<Scalar>
#endif
Expand Down Expand Up @@ -119,10 +119,10 @@ LDLTVariant<Scalar> allocate_ldlt_from_sizes(const std::vector<isize> &nprims,
case LDLTChoice::EIGEN:
return Eigen::LDLT<MatrixXs>(size);
case LDLTChoice::PROXSUITE:
#ifdef PROXNLP_ENABLE_PROXSUITE_LDLT
#ifdef PROXSUITE_NLP_USE_PROXSUITE_LDLT
return linalg::ProxSuiteLDLTWrapper<Scalar>(size, size);
#else
PROXNLP_RUNTIME_ERROR(
PROXSUITE_NLP_RUNTIME_ERROR(
"ProxSuite support is not enabled. You should recompile ProxNLP with "
"the BUILD_WITH_PROXSUITE flag.");
#endif
Expand Down Expand Up @@ -210,7 +210,7 @@ computeInertiaTuple(const Eigen::Ref<Eigen::VectorXi const> &signature) {
nn++;
break;
default:
PROXNLP_RUNTIME_ERROR(
PROXSUITE_NLP_RUNTIME_ERROR(
"Signature vector should only contain values O, 1, -1.");
break;
}
Expand Down
6 changes: 3 additions & 3 deletions include/proxnlp/linalg/block-ldlt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ namespace backend {

/// Implementation struct for the recursive block LDLT algorithm.
template <typename Scalar> struct block_impl {
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
MatrixRef mat;
SymbolicBlockMatrix sym_structure;
/// @returns bool whether the decomposition was successful.
Expand Down Expand Up @@ -194,7 +194,7 @@ template <typename Scalar> struct block_impl {
/// of structure should lead to recalculating the expected sparsity pattern of
/// the factorization, and even recomputing the sparsity-optimal permutation.
template <typename Scalar> struct BlockLDLT : ldlt_base<Scalar> {
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using Base = ldlt_base<Scalar>;
using DView = typename Base::DView;
using Traits = LDLT_Traits<MatrixXs, Eigen::Lower>;
Expand Down Expand Up @@ -340,6 +340,6 @@ template <typename Scalar> struct BlockLDLT : ldlt_base<Scalar> {

#include "./block-ldlt.hxx"

#ifdef PROXNLP_ENABLE_TEMPLATE_INSTANTIATION
#ifdef PROXSUITE_NLP_ENABLE_TEMPLATE_INSTANTIATION
#include "./block-ldlt.txx"
#endif
4 changes: 2 additions & 2 deletions include/proxnlp/linalg/block-ldlt.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ template <typename Derived>
bool BlockLDLT<Scalar>::solveInPlace(Eigen::MatrixBase<Derived> &b) const {

b.noalias() = permutationP().transpose() * b;
PROXNLP_NOMALLOC_BEGIN;
PROXSUITE_NLP_NOMALLOC_BEGIN;
BlockTriL mat_blk_L(m_matrix, m_structure);
bool flag = mat_blk_L.solveInPlace(b);

Expand All @@ -91,7 +91,7 @@ bool BlockLDLT<Scalar>::solveInPlace(Eigen::MatrixBase<Derived> &b) const {
BlockTriU mat_blk_U(m_matrix.transpose(), m_struct_tr);
flag |= mat_blk_U.solveInPlace(b);

PROXNLP_NOMALLOC_END;
PROXSUITE_NLP_NOMALLOC_END;
b.noalias() = permutationP() * b;
return flag;
}
Expand Down
2 changes: 1 addition & 1 deletion include/proxnlp/linalg/dense.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ dense_ldlt_reconstruct(typename math_types<Scalar>::ConstMatrixRef const &mat,

/// @brief A fast, recursive divide-and-conquer LDLT algorithm.
template <typename Scalar> struct DenseLDLT : ldlt_base<Scalar> {
PROXNLP_DYNAMIC_TYPEDEFS(Scalar);
PROXSUITE_NLP_DYNAMIC_TYPEDEFS(Scalar);
using Base = ldlt_base<Scalar>;
using DView = typename Base::DView;
using MatrixType = MatrixXs;
Expand Down
Loading
Loading