C++ API Reference#
-
namespace alpaqa#
Typedefs
-
using real_t = double#
Default floating point type.
-
using realmat = Eigen::Matrix<real_t, Eigen::Dynamic, Eigen::Dynamic>#
Default type for floating point matrices.
-
using dim = std::pair<casadi_int, casadi_int>#
Enums
-
enum class LBFGSStepSize#
Which method to use to select the L-BFGS step size.
Values:
-
enumerator BasedOnGradientStepSize#
-
enumerator BasedOnCurvature#
-
enumerator BasedOnGradientStepSize#
-
enum class PANOCStopCrit#
Values:
-
enumerator ApproxKKT#
Find an ε-approximate KKT point in the ∞-norm:
\[ \varepsilon = \left\| \gamma_k^{-1} (x^k - \hat x^k) + \nabla \psi(\hat x^k) - \nabla \psi(x^k) \right\|_\infty \]
-
enumerator ApproxKKT2#
Find an ε-approximate KKT point in the 2-norm:
\[ \varepsilon = \left\| \gamma_k^{-1} (x^k - \hat x^k) + \nabla \psi(\hat x^k) - \nabla \psi(x^k) \right\|_2 \]
-
enumerator ProjGradNorm#
∞-norm of the projected gradient with step size γ:
\[ \varepsilon = \left\| x^k - \Pi_C\left(x^k - \gamma_k \nabla \psi(x^k)\right) \right\|_\infty \]
-
enumerator ProjGradNorm2#
2-norm of the projected gradient with step size γ:
\[ \varepsilon = \left\| x^k - \Pi_C\left(x^k - \gamma_k \nabla \psi(x^k)\right) \right\|_2 \]
-
enumerator ProjGradUnitNorm#
∞-norm of the projected gradient with unit step size:
\[ \varepsilon = \left\| x^k - \Pi_C\left(x^k - \nabla \psi(x^k)\right) \right\|_\infty \]
-
enumerator ProjGradUnitNorm2#
2-norm of the projected gradient with unit step size:
\[ \varepsilon = \left\| x^k - \Pi_C\left(x^k - \nabla \psi(x^k)\right) \right\|_2 \]
-
enumerator FPRNorm#
∞-norm of fixed point residual:
\[ \varepsilon = \gamma_k^{-1} \left\| x^k - \Pi_C\left(x^k - \gamma_k \nabla \psi(x^k)\right) \right\|_\infty \]
-
enumerator FPRNorm2#
2-norm of fixed point residual:
\[ \varepsilon = \gamma_k^{-1} \left\| x^k - \Pi_C\left(x^k - \gamma_k \nabla \psi(x^k)\right) \right\|_2 \]
-
enumerator Ipopt#
The stopping criterion used by Ipopt, see https://link.springer.com/article/10.1007/s10107-004-0559-y equation (5).
Given a candidate iterate \( \hat x^k \) and the corresponding candidate Lagrange multipliers \( \hat y^k \) for the general constraints \( g(x)\in D \), the multipliers \( w \) for the box constraints \( x\in C \) (that are otherwise not computed explicitly) are given by
\[ w^k = v^k - \Pi_C(v^k), \]\[\begin{split} \begin{aligned} v^k &\triangleq \hat x^k - \nabla f(\hat x^k) - \nabla g(\hat x^k)\, \hat y^k \\ &= \hat x^k - \nabla \psi(\hat x^k) \end{aligned} \end{split}\]\[\begin{split} \begin{aligned} \varepsilon' &= \left\| \nabla f(\hat x^k) + \nabla g(\hat x^k)\, \hat y^k + w^k \right\|_\infty \\ &= \left\| \hat x^k - \Pi_C\left(v^k\right) \right\|_\infty \end{aligned} \end{split}\]\[ s_d \triangleq \max\left\{ s_\text{max},\;\frac{\|\hat y^k\|_1 + \|w^k\|_1}{2m + 2n} \right\} / s_\text{max}, \]
-
enumerator ApproxKKT#
-
enum class SolverStatus#
Exit status of a numerical solver such as ALM or PANOC.
Values:
-
enumerator Unknown#
Initial value.
-
enumerator Converged#
Converged and reached given tolerance.
-
enumerator MaxTime#
Maximum allowed execution time exceeded.
-
enumerator MaxIter#
Maximum number of iterations exceeded.
-
enumerator NotFinite#
Intermediate results were infinite or not-a-number.
-
enumerator NoProgress#
No progress was made in the last iteration.
-
enumerator Interrupted#
Solver was interrupted by the user.
-
enumerator Unknown#
Functions
-
inline const char *enum_name(PANOCStopCrit s)#
-
inline std::ostream &operator<<(std::ostream &os, PANOCStopCrit s)#
-
inline InnerStatsAccumulator<PANOCStats> &operator+=(InnerStatsAccumulator<PANOCStats> &acc, const PANOCStats &s)#
-
inline InnerStatsAccumulator<SecondOrderPANOCSolver::Stats> &operator+=(InnerStatsAccumulator<SecondOrderPANOCSolver::Stats> &acc, const SecondOrderPANOCSolver::Stats &s)#
-
inline InnerStatsAccumulator<StructuredPANOCLBFGSStats> &operator+=(InnerStatsAccumulator<StructuredPANOCLBFGSStats> &acc, const StructuredPANOCLBFGSStats &s)#
-
inline void minimize_update_anderson(LimitedMemoryQR &qr, rmat G, crvec rₖ, crvec rₖ₋₁, crvec gₖ, rvec γ_LS, rvec xₖ_aa)#
Solve one step of Anderson acceleration to find a fixed point of a function g(x):
\( g(x^\star) - x^\star = 0 \)
Updates the QR factorization of \( \mathcal{R}_k = QR \), solves the least squares problem to find \( \gamma_\text{LS} \), computes the next iterate \( x_{k+1} \), and stores the current function value \( g_k \) in the matrix \( G \), which is used as a circular buffer.
\[\begin{split} \begin{aligned} \mathcal{R}_k &= \begin{pmatrix} \Delta r_{k-m_k} & \dots & \Delta r_{k-1} \end{pmatrix} \in \mathbb{R}^{n\times m_k} \\ \Delta r_i &= r_{i+1} - r_i \\ r_i &= g_i - x_i \\ g_i &= g(x_i) \\ \DeclareMathOperator*{\argmin}{argmin} \gamma_\text{LS} &= \argmin_\gamma \left\| \mathcal{R}_k \gamma - r_k \right\|_2 \\ \alpha_i &= \begin{cases} \gamma_\text{LS}[0] & i = 0 \\ \gamma_\text{LS}[i] - \gamma_\text{LS}[i-1] & 0 < i < m_k \\ 1 - \gamma_\text{LS}[m_k - 1] & i = m_k \end{cases} \\ x_{k+1} &= \sum_{i=0}^{m_k} \alpha_i\,g_i \end{aligned} \end{split}\]
-
inline InnerStatsAccumulator<GAAPGASolver::Stats> &operator+=(InnerStatsAccumulator<GAAPGASolver::Stats> &acc, const GAAPGASolver::Stats &s)#
-
inline InnerStatsAccumulator<LBFGSBStats> &operator+=(InnerStatsAccumulator<LBFGSBStats> &acc, const LBFGSBStats &s)#
-
inline InnerStatsAccumulator<PGASolver::Stats> &operator+=(InnerStatsAccumulator<PGASolver::Stats> &acc, const PGASolver::Stats &s)#
-
alpaqa::Problem load_CasADi_problem(const std::string &filename, unsigned n = 0, unsigned m = 0, bool second_order = false)#
Load a problem generated by CasADi (without parameters).
The file should contain functions with the names
f
,grad_f
,g
andgrad_g
. These functions evaluate the objective function, its gradient, the constraints, and the constraint gradient times a vector respecitvely. Ifsecond_order
is true, additional functionshess_L
andhess_L_prod
should be provided to evaluate the Hessian of the Lagrangian and Hessian-vector products.If any of the dimensions are zero, they are determined from the
g
function in the given file.- Parameters
filename – Filename of the shared library to load the functions from.
n – Number of decision variables ( \( x \in \mathbb{R}^n \)).
m – Number of general constraints ( \( g(x) \in \mathbb{R}^m \)).
second_order – Load the additional functions required for second-order PANOC.
- Throws
std::invalid_argument – The dimensions of the loaded functions do not match.
-
ProblemWithParam load_CasADi_problem_with_param(const std::string &filename, unsigned n = 0, unsigned m = 0, unsigned p = 0, bool second_order = false)#
Load a problem generated by CasADi (with parameters).
The file should contain functions with the names
f
,grad_f
,g
andgrad_g
. These functions evaluate the objective function, its gradient, the constraints, and the constraint gradient times a vector respecitvely. Ifsecond_order
is true, additional functionshess_L
andhess_L_prod
should be provided to evaluate the Hessian of the Lagrangian and Hessian-vector products.If any of the dimensions are zero, they are determined from the
g
function in the given file.- Parameters
filename – Filename of the shared library to load the functions from.
n – Number of decision variables ( \( x \in \mathbb{R}^n \)).
m – Number of general constraints ( \( g(x) \in \mathbb{R}^m \)).
p – The number of parameters of the problem (second argument to all CasADi functions).
second_order – Load the additional functions required for second-order PANOC.
- Throws
std::invalid_argument – The dimensions of the loaded functions do not match.
-
template<class ObjFunT, class ObjGradFunT, class DirectionT>
inline PANOCStats panoc_impl(ObjFunT &ψ, ObjGradFunT &grad_ψ, const Box &C, rvec x, real_t ε, const PANOCParams ¶ms, vec_allocator &alloc, DirectionT &direction_provider)#
-
template<class DirectionProviderT = LBFGS, class ObjFunT, class ObjGradFunT>
PANOCStats panoc(ObjFunT &ψ, ObjGradFunT &grad_ψ, const Box &C, rvec x, real_t ε, const PANOCParams ¶ms, PANOCDirection<DirectionProviderT> direction, vec_allocator &alloc)#
-
template<class DirectionProviderT = LBFGS, class ObjFunT, class ObjGradFunT>
PANOCStats panoc(ObjFunT &ψ, ObjGradFunT &grad_ψ, const Box &C, rvec x, real_t ε, const PANOCParams ¶ms, PANOCDirection<DirectionProviderT> direction)#
-
template<class Vec>
inline auto project(const Vec &v, const Box &box)# Project a vector onto a box.
\[ \Pi_C(v) \]- Parameters
v – [in] The vector to project
box – [in] The box to project onto
-
template<class Vec>
inline auto projecting_difference(const Vec &v, const Box &box)# Get the difference between the given vector and its projection.
\[ v - \Pi_C(v) \]Warning
Beware catastrophic cancellation!
- Parameters
v – [in] The vector to project
box – [in] The box to project onto
-
inline real_t dist_squared(crvec v, const Box &box)#
Get the distance squared between the given vector and its projection.
\[ \left\| v - \Pi_C(v) \right\|_2^2 \]Warning
Beware catastrophic cancellation!
- Parameters
v – [in] The vector to project
box – [in] The box to project onto
-
inline real_t dist_squared(crvec v, const Box &box, crvec Σ)#
Get the distance squared between the given vector and its projection in the Σ norm.
\[ \left\| v - \Pi_C(v) \right\|_\Sigma^2 = \left(v - \Pi_C(v)\right)^\top \Sigma \left(v - \Pi_C(v)\right) \]Warning
Beware catastrophic cancellation!
- Parameters
v – [in] The vector to project
box – [in] The box to project onto
Σ – [in] Diagonal matrix defining norm
-
inline EvalCounter &operator+=(EvalCounter &a, const EvalCounter &b)#
-
inline EvalCounter::EvalTimer &operator+=(EvalCounter::EvalTimer &a, const EvalCounter::EvalTimer &b)#
-
inline EvalCounter operator+(EvalCounter a, const EvalCounter &b)#
-
template<class T, class ...Args>
auto wrapped_load(const std::string &so_name, const char *name, Args&&... args)#
-
const char *enum_name(SolverStatus s)#
-
std::ostream &operator<<(std::ostream &os, SolverStatus s)#
Variables
-
constexpr size_t panoc_min_alloc_size = 10#
-
static constexpr auto dims =
[](auto... a) {return std::array<casadi_int, sizeof...(a)>{a...};}
#
-
struct ALMParams#
- #include <alpaqa/decl/alm.hpp>
Parameters for the Augmented Lagrangian solver.
Public Members
-
real_t Δ_lower = 0.8#
Factor to reduce ALMParams::Δ when inner convergence fails.
-
real_t Σ₀ = 1#
Initial penalty parameter.
When set to zero (which is the default), it is computed automatically, based on the constraint violation in the starting point and the parameter ALMParams::σ₀.
-
real_t σ₀ = 20#
Initial penalty parameter factor.
Active if ALMParams::Σ₀ is set to zero.
-
real_t Σ₀_lower = 0.6#
Factor to reduce the initial penalty factor by if convergence fails in in the first iteration.
-
real_t ε₀_increase = 1.1#
Factor to increase the initial primal tolerance if convergence fails in the first iteration.
-
real_t ρ_increase = 2#
Factor to increase the primal tolerance update factor by if convergence fails.
-
unsigned int max_iter = 100#
Maximum number of outer ALM iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
unsigned max_num_initial_retries = 20#
How many times can the initial penalty ALMParams::Σ₀ or ALMParams::σ₀ and the initial primal tolerance ALMParams::ε₀ be reduced.
-
unsigned max_num_retries = 20#
How many times can the penalty update factor ALMParams::Δ and the primal tolerance factor ALMParams::ρ be reduced.
-
unsigned max_total_num_retries = 40#
Combined limit for ALMParams::max_num_initial_retries and ALMParams::max_num_retries.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
bool preconditioning = false#
Apply preconditioning to the problem, based on the gradients in the starting point.
-
bool single_penalty_factor = false#
Use one penalty factor for all m constraints.
-
real_t Δ_lower = 0.8#
-
template<class InnerSolverT = PANOCSolver<>>
class ALMSolver# - #include <alpaqa/decl/alm.hpp>
Augmented Lagrangian Method solver.
Public Functions
-
inline ALMSolver(Params params, InnerSolver &&inner_solver)#
-
inline ALMSolver(Params params, const InnerSolver &inner_solver)#
-
inline std::string get_name() const#
-
inline void stop()#
Abort the computation and return the result so far.
Can be called from other threads or signal handlers.
Public Members
-
InnerSolver inner_solver#
-
struct Stats#
- #include <alpaqa/decl/alm.hpp>
Public Members
-
unsigned outer_iterations = 0#
Total number of outer ALM iterations (i.e.
the number of times that the inner solver was invoked).
-
std::chrono::microseconds elapsed_time#
Total elapsed time.
-
unsigned initial_penalty_reduced = 0#
The number of times that the initial penalty factor was reduced by ALMParams::Σ₀_lower and that the initial tolerance was increased by ALMParams::ε₀_increase because the inner solver failed to converge in the first ALM iteration.
If this number is greater than zero, consider lowering the initial penalty factor ALMParams::Σ₀ or ALMParams::σ₀ or increasing the initial tolerance ALMParams::ε₀ (or both).
-
unsigned penalty_reduced = 0#
The number of times that the penalty update factor ALMParams::Δ was reduced, that the tolerance update factor ALMParams::ρ was increased, and that an ALM iteration had to be restarted with a lower penalty factor and a higher tolerance because the inner solver failed to converge (not applicable in the first ALM iteration).
If this number is greater than zero, consider lowerering the penalty update factor ALMParams::Δ or increasing the tolerance update factor (or both). Lowering the initial penalty factor could help as well.
-
unsigned inner_convergence_failures = 0#
The total number of times that the inner solver failed to converge.
-
real_t ε = inf#
Final primal tolerance that was reached, depends on the stopping criterion used by the inner solver, see for example PANOCStopCrit.
-
real_t δ = inf#
Final dual tolerance or constraint violation that was reached:
\[ \delta = \| \Pi_D\left(g(x^k) + \Sigma^{-1}y\right) \|_\infty \]
-
SolverStatus status = SolverStatus::Unknown#
Whether the solver converged or not.
See also
-
InnerStatsAccumulator<typename InnerSolver::Stats> inner#
The statistics of the inner solver invocations, accumulated over all ALM iterations.
-
unsigned outer_iterations = 0#
-
inline ALMSolver(Params params, InnerSolver &&inner_solver)#
-
class AndersonAccel#
- #include <alpaqa/inner/directions/anderson-acceleration.hpp>
Anderson Acceleration.
-
class AtomicStopSignal#
- #include <alpaqa/util/atomic_stop_signal.hpp>
Public Functions
-
AtomicStopSignal() = default#
-
inline AtomicStopSignal(const AtomicStopSignal&)#
-
AtomicStopSignal &operator=(const AtomicStopSignal&) = delete#
-
inline AtomicStopSignal(AtomicStopSignal&&)#
-
inline AtomicStopSignal &operator=(AtomicStopSignal&&)#
-
inline void stop()#
-
inline bool stop_requested() const#
-
AtomicStopSignal() = default#
-
struct Box#
- #include <alpaqa/util/box.hpp>
-
class CasADiParamWrapper : public ParamWrapper, public std::enable_shared_from_this<CasADiParamWrapper>#
Public Functions
-
inline virtual std::shared_ptr<ParamWrapper> clone() const override#
Public Static Functions
-
static inline std::shared_ptr<CasADiParamWrapper> create(unsigned p, Functions &&functions)#
-
struct Functions#
-
inline virtual std::shared_ptr<ParamWrapper> clone() const override#
-
template<class IndexT = size_t>
struct CircularIndexIterator# - #include <alpaqa/util/ringbuffer.hpp>
Public Types
-
using Indices = CircularIndices<Index>#
-
using reference = value_type#
-
using difference_type = std::ptrdiff_t#
-
using pointer = void#
-
using iterator_category = std::input_iterator_tag#
Public Functions
-
inline CircularIndexIterator()#
-
inline CircularIndexIterator &operator++()#
-
inline CircularIndexIterator &operator--()#
-
inline CircularIndexIterator operator++(int)#
-
inline CircularIndexIterator operator--(int)#
-
template<class IndexT>
bool operator==(CircularIndexIterator<IndexT> a, CircularIndexIterator<IndexT> b)# Note
Only valid for two indices in the same range.
-
template<class IndexT>
bool operator!=(CircularIndexIterator<IndexT> a, CircularIndexIterator<IndexT> b)# Note
Only valid for two indices in the same range.
-
using Indices = CircularIndices<Index>#
-
template<class IndexT = size_t>
struct CircularIndices# - #include <alpaqa/util/ringbuffer.hpp>
-
template<class IndexT>
bool operator==(CircularIndices<IndexT> a, CircularIndices<IndexT> b)# Note
Only valid for two indices in the same range.
-
template<class IndexT>
bool operator!=(CircularIndices<IndexT> a, CircularIndices<IndexT> b)# Note
Only valid for two indices in the same range.
-
template<class IndexT>
-
template<class IndexT>
class CircularRange# - #include <alpaqa/util/ringbuffer.hpp>
Public Types
-
using Indices = CircularIndices<Index>#
-
using const_iterator = CircularIndexIterator<Index>#
-
using iterator = const_iterator#
-
using const_reverse_iterator = ReverseCircularIndexIterator<Index>#
-
using reverse_iterator = const_reverse_iterator#
Public Functions
-
inline const_iterator cbegin() const#
-
inline const_iterator cend() const#
-
inline reverse_iterator rbegin() const#
-
inline reverse_iterator rend() const#
-
inline const_reverse_iterator crbegin() const#
-
inline const_reverse_iterator crend() const#
-
using Indices = CircularIndices<Index>#
-
struct EvalCounter#
- #include <alpaqa/util/problem.hpp>
Public Functions
-
inline void reset()#
Public Members
-
unsigned f = {}#
-
unsigned grad_f = {}#
-
unsigned g = {}#
-
unsigned grad_g_prod = {}#
-
unsigned grad_gi = {}#
-
unsigned hess_L_prod = {}#
-
unsigned hess_L = {}#
-
struct alpaqa::EvalCounter::EvalTimer time#
-
struct EvalTimer#
- #include <alpaqa/util/problem.hpp>
-
inline void reset()#
-
struct GAAPGAParams#
- #include <alpaqa/inner/guarded-aa-pga.hpp>
Public Members
-
LipschitzEstimateParams Lipschitz#
Parameters related to the Lipschitz constant estimate and step size.
-
unsigned limitedqr_mem = 10#
Length of the history to keep in the limited-memory QR algorithm.
-
unsigned max_iter = 100#
Maximum number of inner iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
PANOCStopCrit stop_crit = PANOCStopCrit::ApproxKKT#
What stopping criterion to use.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
unsigned max_no_progress = 10#
Maximum number of iterations without any progress before giving up.
-
bool full_flush_on_γ_change = true#
-
LipschitzEstimateParams Lipschitz#
-
struct GAAPGAProgressInfo#
- #include <alpaqa/inner/guarded-aa-pga.hpp>
-
class GAAPGASolver#
- #include <alpaqa/inner/guarded-aa-pga.hpp>
Guarded Anderson Accelerated Proximal Gradient Algorithm.
Vien V. Mai and Mikael Johansson, Anderson Acceleration of Proximal Gradient Methods. https://arxiv.org/abs/1910.08590v2
-
template<class InnerSolverStats>
struct InnerStatsAccumulator#
- template<> Stats >
- #include <alpaqa/inner/guarded-aa-pga.hpp>
-
template<>
struct InnerStatsAccumulator<LBFGSBStats># - #include <alpaqa/inner/lbfgspp.hpp>
-
template<>
struct InnerStatsAccumulator<PANOCStats># - #include <alpaqa/inner/decl/panoc.hpp>
- template<> Stats >
- #include <alpaqa/inner/pga.hpp>
Public Members
-
std::chrono::microseconds elapsed_time
-
unsigned iterations = 0
-
std::chrono::microseconds elapsed_time
- template<> Stats >
- #include <alpaqa/inner/decl/second-order-panoc.hpp>
-
template<>
struct InnerStatsAccumulator<StructuredPANOCLBFGSStats># - #include <alpaqa/inner/decl/structured-panoc-lbfgs.hpp>
Public Members
-
std::chrono::microseconds elapsed_time#
Total elapsed time in the inner solver.
-
unsigned iterations = 0#
Total number of inner PANOC iterations.
-
unsigned linesearch_failures = 0#
Total number of PANOC line search failures.
-
unsigned lbfgs_failures = 0#
Total number of times that the L-BFGS direction was not finite.
-
unsigned lbfgs_rejected = 0#
Total number of times that the L-BFGS update was rejected (i.e.
it could have resulted in a non-positive definite Hessian estimate).
-
unsigned τ_1_accepted = 0#
Total number of times that a line search parameter of \( \tau = 1 \) was accepted (i.e.
no backtracking necessary).
-
unsigned count_τ = 0#
The total number of line searches performed (used for computing the average value of \( \tau \)).
-
std::chrono::microseconds elapsed_time#
-
class LBFGS#
- #include <alpaqa/inner/directions/decl/lbfgs.hpp>
Limited memory Broyden–Fletcher–Goldfarb–Shanno (L-BFGS) algorithm.
Public Types
-
enum class Sign#
The sign of the vectors \( p \) passed to the LBFGS::update method.
Values:
-
enumerator Positive#
\( p \sim \nabla \psi(x) \)
-
enumerator Negative#
\( p \sim -\nabla \psi(x) \)
-
enumerator Positive#
-
using Params = LBFGSParams#
Public Functions
-
inline bool update(crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, Sign sign, bool forced = false)#
Update the inverse Hessian approximation using the new vectors xₖ₊₁ and pₖ₊₁.
-
template<class Vec>
bool apply(Vec &&q, real_t γ)# Apply the inverse Hessian approximation to the given vector q.
-
template<class Vec, class IndexVec>
bool apply(Vec &&q, real_t γ, const IndexVec &J)# Apply the inverse Hessian approximation to the given vector q, applying only the columns and rows of the Hessian in the index set J.
-
inline void reset()#
Throw away the approximation and all previous vectors s and y.
-
inline void resize(size_t n)#
Re-allocate storage for a problem with a different size.
Causes a reset.
-
inline std::string get_name() const#
-
inline size_t n() const#
Get the size of the s and y vectors in the buffer.
-
inline size_t history() const#
Get the number of previous vectors s and y stored in the buffer.
-
inline size_t succ(size_t i) const#
Get the next index in the circular buffer of previous s and y vectors.
-
inline auto s(size_t i)#
-
inline auto s(size_t i) const#
-
inline auto y(size_t i)#
-
inline auto y(size_t i) const#
Public Static Functions
-
static inline bool update_valid(LBFGSParams params, real_t yᵀs, real_t sᵀs, real_t pᵀp)#
Check if the new vectors s and y allow for a valid BFGS update that preserves the positive definiteness of the Hessian approximation.
-
enum class Sign#
-
template<template<class> class LineSearchT = LBFGSpp::LineSearchBacktracking>
class LBFGSBSolver# - #include <alpaqa/inner/lbfgspp.hpp>
Box-constrained LBFGS solver for ALM.
-
struct LBFGSBStats#
- #include <alpaqa/inner/lbfgspp.hpp>
Public Members
-
SolverStatus status = SolverStatus::Unknown#
-
std::chrono::microseconds elapsed_time#
-
unsigned iterations = 0#
-
SolverStatus status = SolverStatus::Unknown#
-
struct LBFGSParams#
- #include <alpaqa/inner/directions/decl/lbfgs.hpp>
Parameters for the LBFGS and SpecializedLBFGS classes.
Public Members
-
unsigned memory = 10#
Length of the history to keep.
-
struct alpaqa::LBFGSParams::[anonymous] cbfgs#
Parameters in the cautious BFGS update condition.
\[ \frac{y^\top s}{s^\top s} \ge \epsilon \| g \|^\alpha \]
-
bool rescale_when_γ_changes = false#
-
unsigned memory = 10#
- LBFGSParams.cbfgs
-
template<template<class> class LineSearchT = LBFGSpp::LineSearchBacktracking>
class LBFGSSolver# - #include <alpaqa/inner/lbfgspp.hpp>
Unconstrained LBFGS solver for ALM.
Public Functions
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec y, rvec err_z)#
-
inline std::string get_name() const#
-
inline void stop()#
-
struct Stats#
- #include <alpaqa/inner/lbfgspp.hpp>
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec y, rvec err_z)#
-
class LimitedMemoryQR#
- #include <alpaqa/inner/detail/limited-memory-qr.hpp>
Incremental QR factorization using modified Gram-Schmidt with reorthogonalization.
Computes A = QR while allowing efficient removal of the first column of A or adding new columns at the end of A.
Public Types
Public Functions
-
LimitedMemoryQR() = default#
-
inline LimitedMemoryQR(size_t n, size_t m)#
The maximum dimensions of Q are n×m and the maximum dimensions of R are m×m.
- Parameters
n – The size of the vectors, the number of rows of A.
m – The maximum number of columns of A.
-
inline size_t n() const#
-
inline size_t m() const#
-
inline void remove_column()#
Remove the leftmost column.
-
template<class VecB, class VecX>
inline void solve_col(const VecB &b, VecX &x) const# Solve the least squares problem Ax = b.
-
template<class MatB, class MatX>
inline void solve(const MatB &B, MatX &X) const# Solve the least squares problem AX = B.
-
template<class Derived>
inline solve_ret_t<Derived> solve(const Eigen::DenseBase<Derived> &B)# Solve the least squares problem AX = B.
-
inline const mat &get_raw_R() const#
Get the full, raw storage for the upper triangular matrix R.
The columns of this matrix are permuted because it’s stored as a circular buffer for efficiently appending columns to the end and popping columns from the front.
-
inline mat get_full_R() const#
Get the full storage for the upper triangular matrix R but with the columns in the correct order.
Note
Meant for tests only, creates a permuted copy.
-
inline mat get_R() const#
Get the matrix R such that Q times R is the original matrix.
Note
Meant for tests only, creates a permuted copy.
-
inline mat get_Q() const#
Get the matrix Q such that Q times R is the original matrix.
Note
Meant for tests only, creates a copy.
-
inline size_t get_reorth_count() const#
Get the number of MGS reorthogonalizations.
-
inline void clear_reorth_count()#
Reset the number of MGS reorthogonalizations.
-
inline void reset()#
Reset all indices, clearing the Q and R matrices.
-
inline void resize(size_t n, size_t m)#
Re-allocate storage for a problem with a different size.
Causes a reset.
-
inline size_t num_columns() const#
Get the number of columns that are currently stored.
-
inline size_t ring_head() const#
Get the head index of the circular buffer (points to the oldest element).
-
inline size_t ring_tail() const#
Get the tail index of the circular buffer (points to one past the most recent element).
-
inline size_t ring_next(size_t i) const#
Get the next index in the circular buffer.
-
inline size_t ring_prev(size_t i) const#
Get the previous index in the circular buffer.
-
inline CircularRange<size_t> ring_iter() const#
Get iterators in the circular buffer.
-
inline ReverseCircularRange<size_t> ring_reverse_iter() const#
Get reverse iterators in the circular buffer.
-
LimitedMemoryQR() = default#
-
struct LipschitzEstimateParams#
- #include <alpaqa/util/lipschitz.hpp>
Public Members
-
template<class DirectionProviderT>
struct PANOCDirection# - #include <alpaqa/inner/directions/decl/panoc-direction-update.hpp>
Public Static Functions
-
static void initialize(DirectionProviderT &dp, crvec x₀, crvec x̂₀, crvec p₀, crvec grad₀) = delete#
-
static bool update(DirectionProviderT &dp, crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, crvec gradₖ₊₁, const Box &C, real_t γₖ₊₁) = delete#
-
static bool apply(DirectionProviderT &dp, crvec xₖ, crvec x̂ₖ, crvec pₖ, real_t γ, rvec qₖ) = delete#
Apply the direction estimation in the current point.
- Parameters
dp – [in] Direction provider (e.g. LBFGS, Anderson Acceleration).
xₖ – [in] Current iterate.
x̂ₖ – [in] Result of proximal gradient step in current iterate.
pₖ – [in] Proximal gradient step between x̂ₖ and xₖ.
γ – [in] H₀ = γI for L-BFGS
qₖ – [out] Resulting step.
-
static void changed_γ(DirectionProviderT &dp, real_t γₖ, real_t old_γₖ) = delete#
-
static void initialize(DirectionProviderT &dp, crvec x₀, crvec x̂₀, crvec p₀, crvec grad₀) = delete#
-
template<>
struct PANOCDirection<AndersonAccel># - #include <alpaqa/inner/directions/anderson-acceleration.hpp>
-
template<>
struct PANOCDirection<SpecializedLBFGS># - #include <alpaqa/inner/directions/specialized-lbfgs.hpp>
Public Static Functions
-
static inline bool update(SpecializedLBFGS &lbfgs, crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, crvec gradₖ₊₁, const Box &C, real_t γ)#
-
static inline bool apply(SpecializedLBFGS &lbfgs, crvec xₖ, crvec x̂ₖ, crvec pₖ, real_t γ, rvec qₖ)#
-
static inline void changed_γ(SpecializedLBFGS &lbfgs, real_t γₖ, real_t old_γₖ)#
-
static inline bool update(SpecializedLBFGS &lbfgs, crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, crvec gradₖ₊₁, const Box &C, real_t γ)#
-
struct PANOCParams#
- #include <alpaqa/inner/decl/panoc.hpp>
Tuning parameters for the PANOC algorithm.
Public Members
-
LipschitzEstimateParams Lipschitz#
Parameters related to the Lipschitz constant estimate and step size.
-
unsigned max_iter = 100#
Maximum number of inner PANOC iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
PANOCStopCrit stop_crit = PANOCStopCrit::ApproxKKT#
What stopping criterion to use.
-
unsigned max_no_progress = 10#
Maximum number of iterations without any progress before giving up.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
bool update_lipschitz_in_linesearch = true#
-
bool alternative_linesearch_cond = false#
-
LBFGSStepSize lbfgs_stepsize = LBFGSStepSize::BasedOnCurvature#
-
LipschitzEstimateParams Lipschitz#
-
struct PANOCProgressInfo#
- #include <alpaqa/inner/decl/panoc.hpp>
-
template<class DirectionProviderT>
class PANOCSolver# - #include <alpaqa/inner/decl/panoc.hpp>
PANOC solver for ALM.
Public Types
-
using Params = PANOCParams#
-
using DirectionProvider = DirectionProviderT#
-
using Stats = PANOCStats#
-
using ProgressInfo = PANOCProgressInfo#
Public Functions
-
inline PANOCSolver(Params params, PANOCDirection<DirectionProvider> &&direction_provider)#
-
inline PANOCSolver(Params params, const PANOCDirection<DirectionProvider> &direction_provider)#
-
Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec y, rvec err_z)#
- Parameters
problem – [in] Problem description
Σ – [in] Constraint weights \( \Sigma \)
ε – [in] Tolerance \( \varepsilon \)
always_overwrite_results – [in] Overwrite
x
,y
anderr_z
even if not convergedx – [inout] Decision variable \( x \)
y – [inout] Lagrange multipliers \( y \)
err_z – [out] Slack variable error \( g(x) - z \)
-
inline PANOCSolver &set_progress_callback(std::function<void(const ProgressInfo&)> cb)#
-
std::string get_name() const#
-
inline void stop()#
Public Members
-
PANOCDirection<DirectionProvider> direction_provider#
-
using Params = PANOCParams#
-
struct PANOCStats#
- #include <alpaqa/inner/decl/panoc.hpp>
-
class ParamWrapper#
- #include <alpaqa/util/problem.hpp>
Subclassed by CasADiParamWrapper
Public Functions
-
inline ParamWrapper(unsigned p)#
-
virtual ~ParamWrapper() = default#
-
virtual std::shared_ptr<ParamWrapper> clone() const = 0#
-
inline ParamWrapper(unsigned p)#
-
struct PGAParams#
- #include <alpaqa/inner/pga.hpp>
Public Members
-
LipschitzEstimateParams Lipschitz#
Parameters related to the Lipschitz constant estimate and step size.
-
unsigned max_iter = 100#
Maximum number of inner iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
PANOCStopCrit stop_crit = PANOCStopCrit::ApproxKKT#
What stop criterion to use.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
LipschitzEstimateParams Lipschitz#
-
struct PGAProgressInfo#
- #include <alpaqa/inner/pga.hpp>
-
class PGASolver#
- #include <alpaqa/inner/pga.hpp>
Standard Proximal Gradient Algorithm without any bells and whistles.
Public Functions
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec λ, rvec err_z)#
-
inline PGASolver &set_progress_callback(std::function<void(const ProgressInfo&)> cb)#
-
inline std::string get_name() const#
-
inline void stop()#
-
struct Stats#
- #include <alpaqa/inner/pga.hpp>
Public Members
-
SolverStatus status = SolverStatus::Unknown#
-
std::chrono::microseconds elapsed_time#
-
unsigned iterations = 0#
-
SolverStatus status = SolverStatus::Unknown#
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec λ, rvec err_z)#
-
class Problem#
- #include <alpaqa/util/problem.hpp>
Problem description for minimization problems.
\[\begin{split} \begin{aligned} & \underset{x}{\text{minimize}} & & f(x) &&&& f : \mathbb{R}^n \rightarrow \mathbb{R} \\ & \text{subject to} & & \underline{x} \le x \le \overline{x} \\ &&& \underline{z} \le g(x) \le \overline{z} &&&& g : \mathbb{R}^n \rightarrow \mathbb{R}^m \end{aligned} \end{split}\]Subclassed by ProblemOnlyD, ProblemWithParam
Public Types
-
using f_sig = real_t(crvec x)#
Signature of the function that evaluates the cost \( f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
-
using grad_f_sig = void(crvec x, rvec grad_fx)#
Signature of the function that evaluates the gradient of the cost function \( \nabla f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param grad_fx
[out] Gradient of cost function \( \nabla f(x) \in \mathbb{R}^n \)
-
using g_sig = void(crvec x, rvec gx)#
Signature of the function that evaluates the constraints \( g(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param gx
[out] Value of the constraints \( g(x) \in \mathbb{R}^m \)
-
using grad_g_prod_sig = void(crvec x, crvec y, rvec grad_gxy)#
Signature of the function that evaluates the gradient of the constraints times a vector \( \nabla g(x)\ y \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Vector \( y \in \mathbb{R}^m \) to multiply the gradient by
- Param grad_gxy
[out] Gradient of the constraints \( \nabla g(x)\ y \in \mathbb{R}^n \)
-
using grad_gi_sig = void(crvec x, unsigned i, rvec grad_gi)#
Signature of the function that evaluates the gradient of one specific constraints \( \nabla g_i(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param i
[in] Which constraint \( 0 \le i \lt m \)
- Param grad_gi
[out] Gradient of the constraint \( \nabla g_i(x) \mathbb{R}^n \)
-
using hess_L_prod_sig = void(crvec x, crvec y, crvec v, rvec Hv)#
Signature of the function that evaluates the Hessian of the Lagrangian multiplied by a vector \( \nabla_{xx}^2L(x, y)\ v \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param v
[in] Vector to multiply by \( v \in \mathbb{R}^n \)
- Param Hv
[out] Hessian-vector product \( \nabla_{xx}^2 L(x, y)\ v \in \mathbb{R}^{n} \)
-
using hess_L_sig = void(crvec x, crvec y, rmat H)#
Signature of the function that evaluates the Hessian of the Lagrangian \( \nabla_{xx}^2L(x, y) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param H
[out] Hessian \( \nabla_{xx}^2 L(x, y) \in \mathbb{R}^{n\times n} \)
Public Functions
-
Problem() = default#
-
inline Problem(unsigned int n, unsigned int m)#
-
inline Problem(unsigned n, unsigned int m, Box C, Box D, std::function<f_sig> f, std::function<grad_f_sig> grad_f, std::function<g_sig> g, std::function<grad_g_prod_sig> grad_g_prod, std::function<grad_gi_sig> grad_gi, std::function<hess_L_prod_sig> hess_L_prod, std::function<hess_L_sig> hess_L)#
Public Members
-
unsigned int n#
Number of decision variables, dimension of x.
-
unsigned int m#
Number of constraints, dimension of g(x) and z.
-
std::function<grad_f_sig> grad_f#
Gradient of the cost function \( \nabla f(x) \).
-
std::function<grad_g_prod_sig> grad_g_prod#
Gradient of the constraint function times vector \( \nabla g(x)\ y \).
-
std::function<grad_gi_sig> grad_gi#
Gradient of a specific constraint \( \nabla g_i(x) \).
-
std::function<hess_L_prod_sig> hess_L_prod#
Hessian of the Lagrangian function times vector \( \nabla_{xx}^2 L(x, y)\ v \).
-
std::function<hess_L_sig> hess_L#
Hessian of the Lagrangian function \( \nabla_{xx}^2 L(x, y) \).
-
using f_sig = real_t(crvec x)#
-
class ProblemOnlyD : public Problem#
- #include <alpaqa/util/problem.hpp>
Moves the state constraints in the set C to the set D, resulting in an unconstraint inner problem.
The new constraints function g becomes the concatenation of the original g function and the identity function. The new set D is the cartesian product of the original D × C.
Public Types
-
using f_sig = real_t(crvec x)#
Signature of the function that evaluates the cost \( f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
-
using grad_f_sig = void(crvec x, rvec grad_fx)#
Signature of the function that evaluates the gradient of the cost function \( \nabla f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param grad_fx
[out] Gradient of cost function \( \nabla f(x) \in \mathbb{R}^n \)
-
using g_sig = void(crvec x, rvec gx)#
Signature of the function that evaluates the constraints \( g(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param gx
[out] Value of the constraints \( g(x) \in \mathbb{R}^m \)
-
using grad_g_prod_sig = void(crvec x, crvec y, rvec grad_gxy)#
Signature of the function that evaluates the gradient of the constraints times a vector \( \nabla g(x)\ y \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Vector \( y \in \mathbb{R}^m \) to multiply the gradient by
- Param grad_gxy
[out] Gradient of the constraints \( \nabla g(x)\ y \in \mathbb{R}^n \)
-
using grad_gi_sig = void(crvec x, unsigned i, rvec grad_gi)#
Signature of the function that evaluates the gradient of one specific constraints \( \nabla g_i(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param i
[in] Which constraint \( 0 \le i \lt m \)
- Param grad_gi
[out] Gradient of the constraint \( \nabla g_i(x) \mathbb{R}^n \)
-
using hess_L_prod_sig = void(crvec x, crvec y, crvec v, rvec Hv)#
Signature of the function that evaluates the Hessian of the Lagrangian multiplied by a vector \( \nabla_{xx}^2L(x, y)\ v \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param v
[in] Vector to multiply by \( v \in \mathbb{R}^n \)
- Param Hv
[out] Hessian-vector product \( \nabla_{xx}^2 L(x, y)\ v \in \mathbb{R}^{n} \)
-
using hess_L_sig = void(crvec x, crvec y, rmat H)#
Signature of the function that evaluates the Hessian of the Lagrangian \( \nabla_{xx}^2L(x, y) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param H
[out] Hessian \( \nabla_{xx}^2 L(x, y) \in \mathbb{R}^{n\times n} \)
Public Members
-
unsigned int n#
Number of decision variables, dimension of x.
-
unsigned int m#
Number of constraints, dimension of g(x) and z.
-
std::function<grad_f_sig> grad_f#
Gradient of the cost function \( \nabla f(x) \).
-
std::function<grad_g_prod_sig> grad_g_prod#
Gradient of the constraint function times vector \( \nabla g(x)\ y \).
-
std::function<grad_gi_sig> grad_gi#
Gradient of a specific constraint \( \nabla g_i(x) \).
-
std::function<hess_L_prod_sig> hess_L_prod#
Hessian of the Lagrangian function times vector \( \nabla_{xx}^2 L(x, y)\ v \).
-
std::function<hess_L_sig> hess_L#
Hessian of the Lagrangian function \( \nabla_{xx}^2 L(x, y) \).
-
using f_sig = real_t(crvec x)#
-
template<class ProblemT>
class ProblemWithCounters : public ProblemT# - #include <alpaqa/util/problem.hpp>
Public Functions
-
ProblemWithCounters(const ProblemWithCounters&) = delete#
-
ProblemWithCounters &operator=(const ProblemWithCounters&) = delete#
-
ProblemWithCounters(ProblemWithCounters&&) = default#
-
ProblemWithCounters &operator=(ProblemWithCounters&&) = default#
Public Members
-
std::shared_ptr<EvalCounter> evaluations = std::make_shared<EvalCounter>()#
-
ProblemWithCounters(const ProblemWithCounters&) = delete#
-
class ProblemWithParam : public Problem#
- #include <alpaqa/util/problem.hpp>
Public Types
-
using f_sig = real_t(crvec x)#
Signature of the function that evaluates the cost \( f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
-
using grad_f_sig = void(crvec x, rvec grad_fx)#
Signature of the function that evaluates the gradient of the cost function \( \nabla f(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param grad_fx
[out] Gradient of cost function \( \nabla f(x) \in \mathbb{R}^n \)
-
using g_sig = void(crvec x, rvec gx)#
Signature of the function that evaluates the constraints \( g(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param gx
[out] Value of the constraints \( g(x) \in \mathbb{R}^m \)
-
using grad_g_prod_sig = void(crvec x, crvec y, rvec grad_gxy)#
Signature of the function that evaluates the gradient of the constraints times a vector \( \nabla g(x)\ y \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Vector \( y \in \mathbb{R}^m \) to multiply the gradient by
- Param grad_gxy
[out] Gradient of the constraints \( \nabla g(x)\ y \in \mathbb{R}^n \)
-
using grad_gi_sig = void(crvec x, unsigned i, rvec grad_gi)#
Signature of the function that evaluates the gradient of one specific constraints \( \nabla g_i(x) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param i
[in] Which constraint \( 0 \le i \lt m \)
- Param grad_gi
[out] Gradient of the constraint \( \nabla g_i(x) \mathbb{R}^n \)
-
using hess_L_prod_sig = void(crvec x, crvec y, crvec v, rvec Hv)#
Signature of the function that evaluates the Hessian of the Lagrangian multiplied by a vector \( \nabla_{xx}^2L(x, y)\ v \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param v
[in] Vector to multiply by \( v \in \mathbb{R}^n \)
- Param Hv
[out] Hessian-vector product \( \nabla_{xx}^2 L(x, y)\ v \in \mathbb{R}^{n} \)
-
using hess_L_sig = void(crvec x, crvec y, rmat H)#
Signature of the function that evaluates the Hessian of the Lagrangian \( \nabla_{xx}^2L(x, y) \).
- Param x
[in] Decision variable \( x \in \mathbb{R}^n \)
- Param y
[in] Lagrange multipliers \( y \in \mathbb{R}^m \)
- Param H
[out] Hessian \( \nabla_{xx}^2 L(x, y) \in \mathbb{R}^{n\times n} \)
Public Functions
-
inline explicit ProblemWithParam(const ProblemWithParam &o)#
-
inline ProblemWithParam &operator=(const ProblemWithParam &o)#
-
ProblemWithParam(ProblemWithParam&&) = default#
-
ProblemWithParam &operator=(ProblemWithParam&&) = default#
-
Problem() = default#
-
inline Problem(unsigned int n, unsigned int m)#
-
inline Problem(unsigned n, unsigned int m, Box C, Box D, std::function<f_sig> f, std::function<grad_f_sig> grad_f, std::function<g_sig> g, std::function<grad_g_prod_sig> grad_g_prod, std::function<grad_gi_sig> grad_gi, std::function<hess_L_prod_sig> hess_L_prod, std::function<hess_L_sig> hess_L)#
Public Members
-
std::shared_ptr<ParamWrapper> wrapper#
-
unsigned int n#
Number of decision variables, dimension of x.
-
unsigned int m#
Number of constraints, dimension of g(x) and z.
-
std::function<grad_f_sig> grad_f#
Gradient of the cost function \( \nabla f(x) \).
-
std::function<grad_g_prod_sig> grad_g_prod#
Gradient of the constraint function times vector \( \nabla g(x)\ y \).
-
std::function<grad_gi_sig> grad_gi#
Gradient of a specific constraint \( \nabla g_i(x) \).
-
std::function<hess_L_prod_sig> hess_L_prod#
Hessian of the Lagrangian function times vector \( \nabla_{xx}^2 L(x, y)\ v \).
-
std::function<hess_L_sig> hess_L#
Hessian of the Lagrangian function \( \nabla_{xx}^2 L(x, y) \).
-
using f_sig = real_t(crvec x)#
-
template<class IndexT = size_t>
struct ReverseCircularIndexIterator# - #include <alpaqa/util/ringbuffer.hpp>
Public Types
-
using ForwardIterator = CircularIndexIterator<IndexT>#
-
using Index = typename ForwardIterator::Index#
-
using Indices = typename ForwardIterator::Indices#
-
using reference = value_type#
-
using difference_type = std::ptrdiff_t#
-
using pointer = void#
-
using iterator_category = std::input_iterator_tag#
Public Functions
-
inline ReverseCircularIndexIterator()#
-
inline ReverseCircularIndexIterator(ForwardIterator forwardit)#
-
inline ReverseCircularIndexIterator &operator++()#
-
inline ReverseCircularIndexIterator &operator--()#
-
inline ReverseCircularIndexIterator operator++(int)#
-
inline ReverseCircularIndexIterator operator--(int)#
Public Members
-
ForwardIterator forwardit#
-
template<class IndexT>
bool operator==(ReverseCircularIndexIterator<IndexT> a, ReverseCircularIndexIterator<IndexT> b)# Note
Only valid for two indices in the same range.
-
template<class IndexT>
bool operator!=(ReverseCircularIndexIterator<IndexT> a, ReverseCircularIndexIterator<IndexT> b)# Note
Only valid for two indices in the same range.
-
using ForwardIterator = CircularIndexIterator<IndexT>#
-
template<class IndexT>
class ReverseCircularRange# - #include <alpaqa/util/ringbuffer.hpp>
Public Types
-
using ForwardRange = CircularRange<IndexT>#
-
using Index = typename ForwardRange::Index#
-
using Indices = typename ForwardRange::Indices#
-
using const_iterator = typename ForwardRange::const_reverse_iterator#
-
using iterator = typename ForwardRange::reverse_iterator#
-
using const_reverse_iterator = typename ForwardRange::const_iterator#
-
using reverse_iterator = typename ForwardRange::iterator#
Public Functions
-
inline ReverseCircularRange(const ForwardRange &forwardrange)#
-
inline const_iterator cbegin() const#
-
inline const_iterator cend() const#
-
inline reverse_iterator rbegin() const#
-
inline reverse_iterator rend() const#
-
inline const_reverse_iterator crbegin() const#
-
inline const_reverse_iterator crend() const#
-
using ForwardRange = CircularRange<IndexT>#
-
struct SecondOrderPANOCParams#
- #include <alpaqa/inner/decl/second-order-panoc.hpp>
Tuning parameters for the second order PANOC algorithm.
Public Members
-
LipschitzEstimateParams Lipschitz#
Parameters related to the Lipschitz constant estimate and step size.
-
unsigned max_iter = 100#
Maximum number of inner PANOC iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
PANOCStopCrit stop_crit = PANOCStopCrit::ApproxKKT#
What stopping criterion to use.
-
unsigned max_no_progress = 10#
Maximum number of iterations without any progress before giving up.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
bool update_lipschitz_in_linesearch = true#
-
bool alternative_linesearch_cond = false#
-
LipschitzEstimateParams Lipschitz#
-
class SecondOrderPANOCSolver#
- #include <alpaqa/inner/decl/second-order-panoc.hpp>
Second order PANOC solver for ALM.
Public Types
-
using Params = SecondOrderPANOCParams#
Public Functions
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec y, rvec err_z)#
- Parameters
problem – [in] Problem description
Σ – [in] Constraint weights \( \Sigma \)
ε – [in] Tolerance \( \varepsilon \)
always_overwrite_results – [in] Overwrite
x
,y
anderr_z
even if not convergedx – [inout] Decision variable \( x \)
y – [inout] Lagrange multipliers \( y \)
err_z – [out] Slack variable error \( g(x) - z \)
-
inline SecondOrderPANOCSolver &set_progress_callback(std::function<void(const ProgressInfo&)> cb)#
-
inline std::string get_name() const#
-
inline void stop()#
-
struct ProgressInfo#
- #include <alpaqa/inner/decl/second-order-panoc.hpp>
-
struct Stats#
- #include <alpaqa/inner/decl/second-order-panoc.hpp>
-
using Params = SecondOrderPANOCParams#
-
class SpecializedLBFGS#
- #include <alpaqa/inner/directions/decl/specialized-lbfgs.hpp>
Limited memory Broyden–Fletcher–Goldfarb–Shanno (L-BFGS) algorithm that can handle updates of the γ parameter.
Public Types
-
using Params = LBFGSParams#
Public Functions
-
inline bool standard_update(crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, crvec gradₖ₊₁)#
Standard L-BFGS update without changing the step size γ.
-
inline bool full_update(crvec xₖ, crvec xₖ₊₁, crvec pₖ_old_γ, crvec pₖ₊₁, crvec gradₖ₊₁, const Box &C, real_t γ)#
L-BFGS update when changing the step size γ, recomputing everything.
-
inline bool update(crvec xₖ, crvec xₖ₊₁, crvec pₖ, crvec pₖ₊₁, crvec gradₖ₊₁, const Box &C, real_t γ)#
Update the inverse Hessian approximation using the new vectors xₖ₊₁ and pₖ₊₁.
-
template<class Vec>
void apply(Vec &&q)# Apply the inverse Hessian approximation to the given vector q.
-
inline void initialize(crvec x₀, crvec grad₀)#
Initialize with the starting point x₀ and the gradient in that point.
-
inline void reset()#
Throw away the approximation and all previous vectors s and y.
-
inline void resize(size_t n, size_t history)#
Re-allocate storage for a problem with a different size.
-
inline std::string get_name() const#
-
inline size_t n() const#
Get the size of the s, y, x and g vectors in the buffer.
-
inline size_t history() const#
Get the number of previous vectors s, y, x and g stored in the buffer.
-
inline size_t succ(size_t i) const#
Get the next index in the circular buffer of previous s, y, x and g vectors.
-
inline size_t pred(size_t i) const#
Get the previous index in the circular buffer of previous s, y, x and g vectors.
-
inline auto s(size_t i)#
-
inline auto s(size_t i) const#
-
inline auto y(size_t i)#
-
inline auto y(size_t i) const#
-
inline auto x(size_t i)#
-
inline auto x(size_t i) const#
-
inline auto g(size_t i)#
-
inline auto g(size_t i) const#
-
inline auto p()#
-
inline auto p() const#
-
inline auto w()#
-
inline auto w() const#
-
using Params = LBFGSParams#
-
struct StructuredPANOCLBFGSParams#
- #include <alpaqa/inner/decl/structured-panoc-lbfgs.hpp>
Tuning parameters for the second order PANOC algorithm.
Public Members
-
LipschitzEstimateParams Lipschitz#
Parameters related to the Lipschitz constant estimate and step size.
-
unsigned max_iter = 100#
Maximum number of inner PANOC iterations.
-
std::chrono::microseconds max_time = std::chrono::minutes(5)#
Maximum duration.
-
real_t nonmonotone_linesearch = 0#
Factor used in update for exponentially weighted nonmonotone line search.
Zero means monotone line search.
-
PANOCStopCrit stop_crit = PANOCStopCrit::ApproxKKT#
What stopping criterion to use.
-
unsigned max_no_progress = 10#
Maximum number of iterations without any progress before giving up.
-
unsigned print_interval = 0#
When to print progress.
If set to zero, nothing will be printed. If set to N != 0, progress is printed every N iterations.
-
bool update_lipschitz_in_linesearch = true#
-
bool alternative_linesearch_cond = false#
-
bool hessian_vec = true#
-
bool hessian_vec_finite_differences = true#
-
bool full_augmented_hessian = true#
-
unsigned hessian_step_size_heuristic = 0#
-
LBFGSStepSize lbfgs_stepsize = LBFGSStepSize::BasedOnCurvature#
-
LipschitzEstimateParams Lipschitz#
-
struct StructuredPANOCLBFGSProgressInfo#
- #include <alpaqa/inner/decl/structured-panoc-lbfgs.hpp>
-
class StructuredPANOCLBFGSSolver#
- #include <alpaqa/inner/decl/structured-panoc-lbfgs.hpp>
Second order PANOC solver for ALM.
Public Types
-
using Params = StructuredPANOCLBFGSParams#
-
using Stats = StructuredPANOCLBFGSStats#
-
using ProgressInfo = StructuredPANOCLBFGSProgressInfo#
Public Functions
-
inline StructuredPANOCLBFGSSolver(Params params, LBFGSParams lbfgsparams)#
-
inline Stats operator()(const Problem &problem, crvec Σ, real_t ε, bool always_overwrite_results, rvec x, rvec y, rvec err_z)#
- Parameters
problem – [in] Problem description
Σ – [in] Constraint weights \( \Sigma \)
ε – [in] Tolerance \( \varepsilon \)
always_overwrite_results – [in] Overwrite
x
,y
anderr_z
even if not convergedx – [inout] Decision variable \( x \)
y – [inout] Lagrange multipliers \( y \)
err_z – [out] Slack variable error \( g(x) - z \)
-
inline StructuredPANOCLBFGSSolver &set_progress_callback(std::function<void(const ProgressInfo&)> cb)#
-
inline std::string get_name() const#
-
inline void stop()#
-
using Params = StructuredPANOCLBFGSParams#
-
struct StructuredPANOCLBFGSStats#
- #include <alpaqa/inner/decl/structured-panoc-lbfgs.hpp>
-
class vec_allocator#
- #include <alpaqa/util/alloc.hpp>
Public Functions
-
inline vec_allocator(size_t num_vec, Eigen::Index n)#
-
vec_allocator(const vec_allocator&) = delete#
-
vec_allocator(vec_allocator&&) = delete#
-
vec_allocator &operator=(const vec_allocator&) = delete#
-
vec_allocator &operator=(vec_allocator&&) = delete#
-
inline auto alloc()#
-
inline alloc_raii_wrapper alloc_raii()#
-
inline size_t size() const#
-
inline size_t used_space() const#
-
inline Eigen::Index vector_size() const#
-
inline size_t highwater() const#
-
struct alloc_raii_wrapper#
- #include <alpaqa/util/alloc.hpp>
Public Functions
-
inline alloc_raii_wrapper(real_t *dptr, Eigen::Index n, vec_allocator *alloc)#
-
inline alloc_raii_wrapper(mvec &&v, vec_allocator *alloc)#
-
inline ~alloc_raii_wrapper()#
-
alloc_raii_wrapper(const alloc_raii_wrapper&) = delete#
-
inline alloc_raii_wrapper(alloc_raii_wrapper &&o)#
-
alloc_raii_wrapper &operator=(const alloc_raii_wrapper&) = delete#
-
inline alloc_raii_wrapper &operator=(alloc_raii_wrapper &&o)#
-
inline alloc_raii_wrapper &operator=(crvec v)#
-
inline alloc_raii_wrapper(real_t *dptr, Eigen::Index n, vec_allocator *alloc)#
-
inline vec_allocator(size_t num_vec, Eigen::Index n)#
-
namespace detail#
Functions
-
inline void update_penalty_weights(const ALMParams ¶ms, real_t Δ, bool first_iter, rvec e, rvec old_e, real_t norm_e, real_t old_norm_e, crvec Σ_old, rvec Σ)#
-
inline void apply_preconditioning(const Problem &problem, Problem &prec_problem, crvec x, real_t &prec_f, vec &prec_g)#
-
inline real_t calc_ψ_ŷ(const Problem &p, crvec x, crvec y, crvec Σ, rvec ŷ)#
Calculate both ψ(x) and the vector ŷ that can later be used to compute ∇ψ.
\[ \psi(x^k) = f(x^k) + \frac{1}{2} \text{dist}_\Sigma^2\left(g(x^k) + \Sigma^{-1}y,\;D\right) \]\[ \hat{y} \]- Parameters
p – [in] Problem description
x – [in] Decision variable \( x \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
ŷ – [out] \( \hat{y} \)
-
inline void calc_grad_ψ_from_ŷ(const Problem &p, crvec x, crvec ŷ, rvec grad_ψ, rvec work_n)#
Calculate ∇ψ(x) using ŷ.
- Parameters
p – [in] Problem description
x – [in] Decision variable \( x \)
ŷ – [in] \( \hat{y} \)
grad_ψ – [out] \( \nabla \psi(x) \)
work_n – Dimension n
-
inline real_t calc_ψ_grad_ψ(const Problem &p, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)#
Calculate both ψ(x) and its gradient ∇ψ(x).
\[ \psi(x^k) = f(x^k) + \frac{1}{2} \text{dist}_\Sigma^2\left(g(x^k) + \Sigma^{-1}y,\;D\right) \]\[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\ \hat{y}(x) \]- Parameters
p – [in] Problem description
x – [in] Decision variable \( x \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
grad_ψ – [out] \( \nabla \psi(x) \)
work_n – Dimension n
work_m – Dimension m
-
inline void calc_grad_ψ(const Problem &p, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)#
Calculate the gradient ∇ψ(x).
\[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\ \hat{y}(x) \]- Parameters
p – [in] Problem description
x – [in] Decision variable \( x \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
grad_ψ – [out] \( \nabla \psi(x) \)
work_n – Dimension n
work_m – Dimension m
- inline void calc_err_z (const Problem &p, crvec x̂, crvec y, crvec Σ, rvec err_z)
Calculate the error between ẑ and g(x).
\[ \hat{z}^k = \Pi_D\left(g(x^k) + \Sigma^{-1}y\right) \]- Parameters
p – [in] Problem description
x̂ – [in] Decision variable \( \hat{x} \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
err_z – [out] \( g(\hat{x}) - \hat{z} \)
-
inline auto projected_gradient_step(const Box &C, real_t γ, crvec x, crvec grad_ψ)#
Projected gradient step.
\[\begin{split} \begin{aligned} \hat{x}^k &= T_{\gamma^k}\left(x^k\right) \\ &= \Pi_C\left(x^k - \gamma^k \nabla \psi(x^k)\right) \\ p^k &= \hat{x}^k - x^k \\ \end{aligned} \end{split}\]- Parameters
C – [in] Set to project onto
γ – [in] Step size
x – [in] Decision variable \( x \)
grad_ψ – [in] \( \nabla \psi(x^k) \)
- inline void calc_x̂ (const Problem &prob, real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p)
- Parameters
prob – [in] Problem description
γ – [in] Step size
x – [in] Decision variable \( x \)
grad_ψ – [in] \( \nabla \psi(x^k) \)
x̂ – [out] \( \hat{x}^k = T_{\gamma^k}(x^k) \)
p – [out] \( \hat{x}^k - x^k \)
-
inline bool stop_crit_requires_grad_̂ψₖ(PANOCStopCrit crit)#
-
inline real_t calc_error_stop_crit(const Box &C, PANOCStopCrit crit, crvec pₖ, real_t γ, crvec xₖ, crvec x̂ₖ, crvec ŷₖ, crvec grad_ψₖ, crvec grad_̂ψₖ)#
Compute the ε from the stopping criterion, see PANOCStopCrit.
- Parameters
C – [in] Box constraints on x
crit – [in] What stoppint criterion to use
pₖ – [in] Projected gradient step \( \hat x^k - x^k \)
γ – [in] Step size
xₖ – [in] Current iterate
x̂ₖ – [in] Current iterate after projected gradient step
ŷₖ – [in] Candidate Lagrange multipliers
grad_ψₖ – [in] Gradient in \( x^k \)
grad_̂ψₖ – [in] Gradient in \( \hat x^k \)
-
inline real_t descent_lemma(const Problem &problem, real_t rounding_tolerance, real_t L_max, crvec xₖ, real_t ψₖ, crvec grad_ψₖ, crvec y, crvec Σ, rvec x̂ₖ, rvec pₖ, rvec ŷx̂ₖ, real_t &ψx̂ₖ, real_t &norm_sq_pₖ, real_t &grad_ψₖᵀpₖ, real_t &Lₖ, real_t &γₖ)#
Increase the estimate of the Lipschitz constant of the objective gradient and decrease the step size until quadratic upper bound or descent lemma is satisfied:
\[ \psi(x + p) \le \psi(x) + \nabla\psi(x)^\top p + \frac{L}{2} \|p\|^2 \]- Parameters
problem – [in] Problem description
rounding_tolerance – [in] Tolerance used to ignore rounding errors when the function \( \psi(x) \) is relatively flat or the step size is very small, which could cause \( \psi(x^k) < \psi(\hat x^k) \), which is mathematically impossible but could occur in finite precision floating point arithmetic.
L_max – [in] Maximum allowed Lipschitz constant estimate (prevents infinite loop if function or derivatives are discontinuous, and keeps step size bounded away from zero).
xₖ – [in] Current iterate \( x^k \)
ψₖ – [in] Objective function \( \psi(x^k) \)
grad_ψₖ – [in] Gradient of objective \( \nabla\psi(x^k) \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
x̂ₖ – [out] Projected gradient iterate \( \hat x^k \)
pₖ – [out] Projected gradient step \( p^k \)
ŷx̂ₖ – [out] Intermediate vector \( \hat y(x^k) \)
ψx̂ₖ – [inout] Objective function \( \psi(\hat x^k) \)
norm_sq_pₖ – [inout] Squared norm of the step \( \left\| p^k \right\|^2 \)
grad_ψₖᵀpₖ – [inout] Gradient of objective times step \( \nabla\psi(x^k)^\top p^k\)
Lₖ – [inout] Lipschitz constant estimate \( L_{\nabla\psi}^k \)
γₖ – [inout] Step size \( \gamma^k \)
- Returns
The original step size, before it was reduced by this function.
-
template<class ParamsT, class DurationT>
inline SolverStatus check_all_stop_conditions(const ParamsT ¶ms, DurationT time_elapsed, unsigned iteration, const AtomicStopSignal &stop_signal, real_t ε, real_t εₖ, unsigned no_progress)# Check all stop conditions (required tolerance reached, out of time, maximum number of iterations exceeded, interrupted by user, infinite iterate, no progress made)
- Parameters
params – [in] Parameters including `max_iter`, `max_time` and `max_no_progress`
time_elapsed – [in] Time elapsed since the start of the algorithm
iteration – [in] The current iteration number
stop_signal – [in] A stop signal for the user to interrupt the algorithm
ε – [in] Desired primal tolerance
εₖ – [in] Tolerance of the current iterate
no_progress – [in] The number of successive iterations no progress was made
-
inline void calc_augmented_lagrangian_hessian(const Problem &problem, crvec xₖ, crvec ŷxₖ, crvec y, crvec Σ, rvec g, mat &H, rvec work_n)#
Compute the Hessian matrix of the augmented Lagrangian function.
\[ \nabla^2_{xx} L_\Sigma(x, y) = \Big. \nabla_{xx}^2 L(x, y) \Big|_{\big(x,\, \hat y(x, y)\big)} + \sum_{i\in\mathcal{I}} \Sigma_i\,\nabla g_i(x) \nabla g_i(x)^\top \]- Parameters
problem – [in] Problem description
xₖ – [in] Current iterate \( x^k \)
ŷxₖ – [in] Intermediate vector \( \hat y(x^k) \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
g – [out] The constraint values \( g(x^k) \)
H – [out] Hessian matrix \( H(x, y) \)
work_n – Dimension n
-
inline void calc_augmented_lagrangian_hessian_prod_fd(const Problem &problem, crvec xₖ, crvec y, crvec Σ, crvec grad_ψ, crvec v, rvec Hv, rvec work_n1, rvec work_n2, rvec work_m)#
Compute the Hessian matrix of the augmented Lagrangian function multiplied by the given vector, using finite differences.
\[ \nabla^2_{xx} L_\Sigma(x, y)\, v \approx \frac{\nabla_x L_\Sigma(x+hv, y) - \nabla_x L_\Sigma(x, y)}{h} \]- Parameters
problem – [in] Problem description
xₖ – [in] Current iterate \( x^k \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
grad_ψ – [in] Gradient \( \nabla \psi(x^k) \)
v – [in] Vector to multiply with the Hessian
Hv – [out] Hessian-vector product
work_n1 – Dimension n
work_n2 – Dimension n
work_m – Dimension m
-
inline real_t initial_lipschitz_estimate(const Problem &problem, crvec xₖ, crvec y, crvec Σ, real_t ε, real_t δ, real_t L_min, real_t L_max, real_t &ψ, rvec grad_ψ, rvec work_n1, rvec work_n2, rvec work_n3, rvec work_m)#
Estimate the Lipschitz constant of the gradient \( \nabla \psi \) using finite differences.
- Parameters
problem – [in] Problem description
xₖ – [in] Current iterate \( x^k \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
ε – [in] Finite difference step size relative to xₖ
δ – [in] Minimum absolute finite difference step size
L_min – [in] Minimum allowed Lipschitz estimate.
L_max – [in] Maximum allowed Lipschitz estimate.
ψ – [out] \( \psi(x^k) \)
grad_ψ – [out] Gradient \( \nabla \psi(x^k) \)
work_n1 – Dimension n
work_n2 – Dimension n
work_n3 – Dimension n
work_m – Dimension m
-
inline real_t initial_lipschitz_estimate(const Problem &problem, crvec xₖ, crvec y, crvec Σ, real_t ε, real_t δ, real_t L_min, real_t L_max, rvec grad_ψ, rvec work_n1, rvec work_n2, rvec work_n3, rvec work_m)#
Estimate the Lipschitz constant of the gradient \( \nabla \psi \) using finite differences.
- Parameters
problem – [in] Problem description
xₖ – [in] Current iterate \( x^k \)
y – [in] Lagrange multipliers \( y \)
Σ – [in] Penalty weights \( \Sigma \)
ε – [in] Finite difference step size relative to xₖ
δ – [in] Minimum absolute finite difference step size
L_min – [in] Minimum allowed Lipschitz estimate.
L_max – [in] Maximum allowed Lipschitz estimate.
grad_ψ – [out] Gradient \( \nabla \psi(x^k) \)
work_n1 – Dimension n
work_n2 – Dimension n
work_n3 – Dimension n
work_m – Dimension m
-
template<class ObjFunT, class ObjGradFunT>
real_t initial_lipschitz_estimate(const ObjFunT &ψ, const ObjGradFunT &grad_ψ, crvec xₖ, real_t ε, real_t δ, real_t L_min, real_t L_max, real_t &ψₖ, rvec grad_ψₖ, vec_allocator &alloc)# Estimate the Lipschitz constant of the gradient \( \nabla \psi \) using finite differences.
- Parameters
ψ – [in] Objective
grad_ψ – [in] Gradient of
xₖ – [in] Current iterate \( x^k \)
ε – [in] Finite difference step size relative to xₖ
δ – [in] Minimum absolute finite difference step size
L_min – [in] Minimum allowed Lipschitz estimate.
L_max – [in] Maximum allowed Lipschitz estimate.
ψₖ – [out] \( \psi(x^k) \)
grad_ψₖ – [out] Gradient \( \nabla \psi(x^k) \)
alloc – [in]
- inline void calc_x̂ (real_t γ, crvec x, const Box &C, crvec grad_ψ, rvec x̂, rvec p)
- Parameters
γ – [in] Step size
x – [in] Decision variable \( x \)
C – [in] Box constraints \( C \)
grad_ψ – [in] \( \nabla \psi(x^k) \)
x̂ – [out] \( \hat{x}^k = T_{\gamma^k}(x^k) \)
p – [out] \( \hat{x}^k - x^k \)
-
template<class ObjFunT>
real_t descent_lemma(const ObjFunT &ψ, const Box &C, real_t rounding_tolerance, real_t L_max, crvec xₖ, real_t ψₖ, crvec grad_ψₖ, rvec x̂ₖ, rvec pₖ, real_t &ψx̂ₖ, real_t &norm_sq_pₖ, real_t &grad_ψₖᵀpₖ, real_t &Lₖ, real_t &γₖ)# Increase the estimate of the Lipschitz constant of the objective gradient and decrease the step size until quadratic upper bound or descent lemma is satisfied:
\[ \psi(x + p) \le \psi(x) + \nabla\psi(x)^\top p + \frac{L}{2} \|p\|^2 \]- Parameters
ψ – [in] Objective.
C – [in] Box constraints.
rounding_tolerance – [in] Tolerance used to ignore rounding errors when the function \( \psi(x) \) is relatively flat or the step size is very small, which could cause \( \psi(x^k) < \psi(\hat x^k) \), which is mathematically impossible but could occur in finite precision floating point arithmetic.
L_max – [in] Maximum allowed Lipschitz constant estimate (prevents infinite loop if function or derivatives are discontinuous, and keeps step size bounded away from zero).
xₖ – [in] Current iterate \( x^k \)
ψₖ – [in] Objective function \( \psi(x^k) \)
grad_ψₖ – [in] Gradient of objective \( \nabla\psi(x^k) \)
x̂ₖ – [out] Projected gradient iterate \( \hat x^k \)
pₖ – [out] Projected gradient step \( p^k \)
ψx̂ₖ – [inout] Objective function \( \psi(\hat x^k) \)
norm_sq_pₖ – [inout] Squared norm of the step \( \left\| p^k \right\|^2 \)
grad_ψₖᵀpₖ – [inout] Gradient of objective times step \( \nabla\psi(x^k)^\top p^k\)
Lₖ – [inout] Lipschitz constant estimate \( L_{\nabla\psi}^k \)
γₖ – [inout] Step size \( \gamma^k \)
- Returns
The original step size, before it was reduced by this function.
-
inline void update_penalty_weights(const ALMParams ¶ms, real_t Δ, bool first_iter, rvec e, rvec old_e, real_t norm_e, real_t old_norm_e, crvec Σ_old, rvec Σ)#
-
namespace vec_util#
Functions
-
template<class V, class M>
auto norm_squared_weighted(V &&v, M &&Σ)# Get the Σ norm squared of a given vector, with Σ a diagonal matrix.
- Returns
\( \langle v, \Sigma v \rangle \)
-
template<class V, class M>
-
using real_t = double#