Nonconvex constrained optimization
Loading...
Searching...
No Matches
type-erased-problem.hpp
Go to the documentation of this file.
1#pragma once
2
4#include <alpaqa/export.hpp>
9#include <guanaqo/not-implemented.hpp>
10#include <guanaqo/required-method.hpp>
11#include <guanaqo/type-erasure.hpp>
12#include <type_traits>
13#include <utility>
14
15#ifdef _MSC_VER
16#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_START \
17 __pragma(warning(push)) __pragma(warning(disable : 4275))
18#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_END __pragma(warning(pop))
19#else
20#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_START
21#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_END
22#endif
23
24namespace alpaqa {
25
26/// Raised when calling problem functions that are not implemented.
27using guanaqo::not_implemented_error;
28
29/// Struct containing function pointers to all problem functions (like the
30/// objective and constraint functions, with their derivatives, and more).
31/// Some default implementations are available.
32/// Internal struct, it is used by @ref TypeErasedProblem.
34template <Config Conf>
35struct ALPAQA_EXPORT ProblemVTable : guanaqo::BasicVTable {
38
39 template <class F>
40 using optional_function_t = guanaqo::optional_function_t<F, ProblemVTable>;
41 template <class F>
42 using required_function_t = guanaqo::required_function_t<F>;
43
44 // clang-format off
45
46 // Required
47 required_function_t<void(crvec z, rvec e) const>
49 required_function_t<void(rvec y, real_t M) const>
51 required_function_t<real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const>
55 required_function_t<void(crvec x, rvec grad_fx) const>
57 required_function_t<void(crvec x, rvec gx) const>
59 required_function_t<void(crvec x, crvec y, rvec grad_gxy) const>
61 optional_function_t<index_t(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const>
63 optional_function_t<void(real_t γ, crvec x, rvec J_diag) const>
67
68 // Second order
69 optional_function_t<void(crvec x, rvec J_values) const>
71 optional_function_t<Sparsity() const>
73 optional_function_t<void(crvec x, index_t i, rvec grad_gi) const>
75 optional_function_t<void(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const>
77 optional_function_t<void(crvec x, crvec y, real_t scale, rvec H_values) const>
79 optional_function_t<Sparsity() const>
81 optional_function_t<void(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const>
83 optional_function_t<void(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const>
85 optional_function_t<Sparsity() const>
87
88 // Combined evaluations
89 optional_function_t<real_t(crvec x, rvec grad_fx) const>
93 optional_function_t<void(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const>
95
96 // Lagrangian and augmented lagrangian evaluations
97 optional_function_t<void(crvec x, crvec y, rvec grad_L, rvec work_n) const>
99 optional_function_t<real_t(crvec x, crvec y, crvec Σ, rvec ŷ) const>
101 optional_function_t<void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
103 optional_function_t<real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
105
106 // Constraint sets
107 optional_function_t<const Box &() const>
109 optional_function_t<const Box &() const>
111
112 // Check
113 optional_function_t<void() const>
115 optional_function_t<std::string() const>
117
118 // clang-format on
119
120 static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ,
121 const ProblemVTable &vtable) {
122 if constexpr (requires { Σ(0); })
123 if (Σ.size() == 1) {
124 // ζ = g(x) + Σ⁻¹y
125 g_ŷ += (1 / Σ(0)) * y;
126 // d = ζ - Π(ζ, D)
127 vtable.eval_projecting_difference_constraints(self, g_ŷ, g_ŷ);
128 // dᵀŷ, ŷ = Σ d
129 real_t dᵀŷ = Σ(0) * g_ŷ.dot(g_ŷ);
130 g_ŷ *= Σ(0);
131 return dᵀŷ;
132 }
133 if (Σ.size() != y.size())
134 throw std::logic_error("Penalty/multiplier size mismatch");
135 // ζ = g(x) + Σ⁻¹y
136 g_ŷ += y.cwiseQuotient(Σ);
137 // d = ζ - Π(ζ, D)
138 vtable.eval_projecting_difference_constraints(self, g_ŷ, g_ŷ);
139 // dᵀŷ, ŷ = Σ d
140 real_t dᵀŷ = g_ŷ.dot(Σ.cwiseProduct(g_ŷ));
141 g_ŷ = Σ.cwiseProduct(g_ŷ);
142 return dᵀŷ;
143 }
145 throw not_implemented_error("eval_nonsmooth_objective");
146 }
148 rindexvec, const ProblemVTable &) {
149 throw not_implemented_error("eval_inactive_indices_res_lna");
150 }
152 const ProblemVTable &) {
153 throw not_implemented_error("eval_prox_jacobian_diag");
154 }
155 static void default_eval_constraints_jacobian(const void *, crvec, rvec,
156 const ProblemVTable &vtable) {
157 if (vtable.m != 0)
158 throw not_implemented_error("eval_constraints_jacobian");
159 }
160 static Sparsity default_get_constraints_jacobian_sparsity(const void *,
161 const ProblemVTable &vtable) {
162 return sparsity::Dense{vtable.m, vtable.n};
163 }
164 static void default_eval_grad_gi(const void *, crvec, index_t, rvec, const ProblemVTable &) {
165 throw not_implemented_error("eval_grad_gi");
166 }
168 rvec, const ProblemVTable &) {
169 throw not_implemented_error("eval_lagrangian_hessian_product");
170 }
172 const ProblemVTable &) {
173 throw not_implemented_error("eval_lagrangian_hessian");
174 }
175 static Sparsity default_get_lagrangian_hessian_sparsity(const void *,
176 const ProblemVTable &vtable) {
177 return sparsity::Dense{vtable.n, vtable.n, sparsity::Symmetry::Upper};
178 }
180 crvec y, crvec, real_t scale,
181 crvec v, rvec Hv,
182 const ProblemVTable &vtable) {
183 if (vtable.m == 0 && vtable.eval_lagrangian_hessian_product !=
185 return vtable.eval_lagrangian_hessian_product(self, x, y, scale, v, Hv, vtable);
186 throw not_implemented_error("eval_augmented_lagrangian_hessian_product");
187 }
188 static void default_eval_augmented_lagrangian_hessian(const void *self, crvec x, crvec y, crvec,
189 real_t scale, rvec H_values,
190 const ProblemVTable &vtable) {
191 if (vtable.m == 0 && vtable.eval_lagrangian_hessian != default_eval_lagrangian_hessian)
192 return vtable.eval_lagrangian_hessian(self, x, y, scale, H_values, vtable);
193 throw not_implemented_error("eval_augmented_lagrangian_hessian");
194 }
196 const ProblemVTable &vtable) {
197 if (vtable.m == 0 &&
199 return vtable.get_lagrangian_hessian_sparsity(self, vtable);
200 return sparsity::Dense{vtable.n, vtable.n, sparsity::Symmetry::Upper};
201 }
202 /** @implementation{ProblemVTable<Conf>::default_eval_objective_and_gradient} */
203 /* [ProblemVTable<Conf>::default_eval_objective_and_gradient] */
204 static real_t default_eval_objective_and_gradient(const void *self, crvec x, rvec grad_fx,
205 const ProblemVTable &vtable) {
206 vtable.eval_objective_gradient(self, x, grad_fx);
207 return vtable.eval_objective(self, x);
208 }
209 /* [ProblemVTable<Conf>::default_eval_objective_and_gradient] */
210
211 /** @implementation{ProblemVTable<Conf>::default_eval_objective_and_constraints} */
212 /* [ProblemVTable<Conf>::default_eval_objective_and_constraints] */
214 const ProblemVTable &vtable) {
215 vtable.eval_constraints(self, x, g);
216 return vtable.eval_objective(self, x);
217 }
218 /* [ProblemVTable<Conf>::default_eval_objective_and_constraints] */
219
220 /** @implementation{ProblemVTable<Conf>::default_eval_objective_gradient_and_constraints_gradient_product} */
221 /* [ProblemVTable<Conf>::default_eval_objective_gradient_and_constraints_gradient_product] */
223 const void *self, crvec x, crvec y, rvec grad_f, rvec grad_gxy,
224 const ProblemVTable &vtable) {
225 vtable.eval_objective_gradient(self, x, grad_f);
226 vtable.eval_constraints_gradient_product(self, x, y, grad_gxy);
227 }
228 /* [ProblemVTable<Conf>::default_eval_objective_gradient_and_constraints_gradient_product] */
229
230 /** @implementation{ProblemVTable<Conf>::default_eval_lagrangian_gradient} */
231 /* [ProblemVTable<Conf>::default_eval_lagrangian_gradient] */
232 static void default_eval_lagrangian_gradient(const void *self, crvec x, crvec y, rvec grad_L,
233 rvec work_n, const ProblemVTable &vtable) {
234 if (y.size() == 0) /* [[unlikely]] */
235 return vtable.eval_objective_gradient(self, x, grad_L);
236 vtable.eval_objective_gradient_and_constraints_gradient_product(self, x, y, grad_L, work_n,
237 vtable);
238 grad_L += work_n;
239 }
240 /* [ProblemVTable<Conf>::default_eval_lagrangian_gradient] */
241
242 /** @implementation{ProblemVTable<Conf>::default_eval_augmented_lagrangian} */
243 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian] */
244 static real_t default_eval_augmented_lagrangian(const void *self, crvec x, crvec y, crvec Σ,
245 rvec ŷ, const ProblemVTable &vtable) {
246 if (y.size() == 0) /* [[unlikely]] */
247 return vtable.eval_objective(self, x);
248
249 auto f = vtable.eval_objective_and_constraints(self, x, ŷ, vtable);
250 auto dᵀŷ = calc_ŷ_dᵀŷ(self, ŷ, y, Σ, vtable);
251 // ψ(x) = f(x) + ½ dᵀŷ
252 auto ψ = f + real_t(0.5) * dᵀŷ;
253 return ψ;
254 }
255 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian] */
256
257 /** @implementation{ProblemVTable<Conf>::default_eval_augmented_lagrangian_gradient} */
258 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian_gradient] */
259 static void default_eval_augmented_lagrangian_gradient(const void *self, crvec x, crvec y,
260 crvec Σ, rvec grad_ψ, rvec work_n,
261 rvec work_m,
262 const ProblemVTable &vtable) {
263 if (y.size() == 0) /* [[unlikely]] */ {
264 vtable.eval_objective_gradient(self, x, grad_ψ);
265 } else {
266 vtable.eval_constraints(self, x, work_m);
267 (void)calc_ŷ_dᵀŷ(self, work_m, y, Σ, vtable);
268 vtable.eval_lagrangian_gradient(self, x, work_m, grad_ψ, work_n, vtable);
269 }
270 }
271 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian_gradient] */
272
273 /** @implementation{ProblemVTable<Conf>::default_eval_augmented_lagrangian_and_gradient} */
274 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian_and_gradient] */
276 crvec Σ, rvec grad_ψ, rvec work_n,
277 rvec work_m,
278 const ProblemVTable &vtable) {
279 if (y.size() == 0) /* [[unlikely]] */
280 return vtable.eval_objective_and_gradient(self, x, grad_ψ, vtable);
281
282 auto &ŷ = work_m;
283 // ψ(x) = f(x) + ½ dᵀŷ
284 auto f = vtable.eval_objective_and_constraints(self, x, ŷ, vtable);
285 auto dᵀŷ = calc_ŷ_dᵀŷ(self, ŷ, y, Σ, vtable);
286 auto ψ = f + real_t(0.5) * dᵀŷ;
287 // ∇ψ(x) = ∇f(x) + ∇g(x) ŷ
288 vtable.eval_lagrangian_gradient(self, x, ŷ, grad_ψ, work_n, vtable);
289 return ψ;
290 }
291 /* [ProblemVTable<Conf>::default_eval_augmented_lagrangian_and_gradient] */
292 static const Box &default_get_variable_bounds(const void *, const ProblemVTable &) {
293 throw not_implemented_error("get_variable_bounds");
294 }
295 static const Box &default_get_general_bounds(const void *, const ProblemVTable &) {
296 throw not_implemented_error("get_general_bounds");
297 }
298 static void default_check(const void *, const ProblemVTable &) {}
299 static std::string default_get_name(const void *, const ProblemVTable &) {
300 return "unknown problem";
301 }
302
304
305 template <class P>
306 ProblemVTable(std::in_place_t, P &p) : guanaqo::BasicVTable{std::in_place, p} {
307 auto &vtable = *this;
308
309 // Initialize all methods
310
311 // Required
312 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_projecting_difference_constraints);
313 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_projection_multipliers);
314 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_proximal_gradient_step);
315 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_objective);
316 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_objective_gradient);
317 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_constraints);
318 GUANAQO_TE_REQUIRED_METHOD(vtable, P, eval_constraints_gradient_product);
319 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_inactive_indices_res_lna, p);
320 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_prox_jacobian_diag, p);
321 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_nonsmooth_objective, p);
322 // Second order
323 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_constraints_jacobian, p);
324 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_constraints_jacobian_sparsity, p);
325 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_grad_gi, p);
326 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_lagrangian_hessian_product, p);
327 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_lagrangian_hessian, p);
328 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_lagrangian_hessian_sparsity, p);
329 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_augmented_lagrangian_hessian_product, p);
330 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_augmented_lagrangian_hessian, p);
331 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_augmented_lagrangian_hessian_sparsity, p);
332 // Combined evaluations
333 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_objective_and_gradient, p);
334 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_objective_and_constraints, p);
335 GUANAQO_TE_OPTIONAL_METHOD(vtable, P,
337 // Lagrangian and augmented lagrangian evaluations
338 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_lagrangian_gradient, p);
339 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_augmented_lagrangian, p);
340 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_augmented_lagrangian_gradient, p);
341 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, eval_augmented_lagrangian_and_gradient, p);
342 // Constraint set
343 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_variable_bounds, p);
344 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_general_bounds, p);
345 // Check
346 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, check, p);
347 GUANAQO_TE_OPTIONAL_METHOD(vtable, P, get_name, p);
348
349 // Dimensions
350 vtable.n = p.get_num_variables();
351 vtable.m = p.get_num_constraints();
352 }
353 ProblemVTable() = default;
354};
356
357#ifndef _WIN32
362#endif
363
364/// @addtogroup grp_Problems
365/// @{
366
367/// The main polymorphic minimization problem interface.
368///
369/// This class wraps the actual problem implementation class, filling in the
370/// missing member functions with sensible defaults, and providing a uniform
371/// interface that is used by the solvers.
372///
373/// The problem implementations do not inherit from an abstract base class.
374/// Instead, [structural typing](https://en.wikipedia.org/wiki/Structural_type_system)
375/// is used. The @ref ProblemVTable constructor uses reflection to discover
376/// which member functions are provided by the problem implementation. See
377/// @ref page-problem-formulations for more information, and
378/// @ref C++/CustomCppProblem/main.cpp for an example.
379template <Config Conf = DefaultConfig, class Allocator = std::allocator<std::byte>>
380class TypeErasedProblem : public guanaqo::TypeErased<ProblemVTable<Conf>, Allocator> {
381 public:
385 using allocator_type = Allocator;
386 using TypeErased = guanaqo::TypeErased<VTable, allocator_type>;
387 using TypeErased::TypeErased;
388
389 protected:
390 using TypeErased::call;
391 using TypeErased::self;
392 using TypeErased::vtable;
393
394 public:
395 template <class T, class... Args>
396 static TypeErasedProblem make(Args &&...args) {
397 return TypeErased::template make<TypeErasedProblem, T>(std::forward<Args>(args)...);
398 }
399
400 /// @name Problem dimensions
401 /// @{
402
403 /// **[Required]**
404 /// Number of decision variables.
405 [[nodiscard]] length_t get_num_variables() const;
406 /// **[Required]**
407 /// Number of constraints.
408 [[nodiscard]] length_t get_num_constraints() const;
409
410 /// @}
411
412 /// @name Required cost and constraint functions
413 /// @{
414
415 /// **[Required]**
416 /// Function that evaluates the cost, @f$ f(x) @f$
417 /// @param [in] x
418 /// Decision variable @f$ x \in \R^n @f$
419 [[nodiscard]] real_t eval_objective(crvec x) const;
420 /// **[Required]**
421 /// Function that evaluates the gradient of the cost, @f$ \nabla f(x) @f$
422 /// @param [in] x
423 /// Decision variable @f$ x \in \R^n @f$
424 /// @param [out] grad_fx
425 /// Gradient of cost function @f$ \nabla f(x) \in \R^n @f$
426 void eval_objective_gradient(crvec x, rvec grad_fx) const;
427 /// **[Required]**
428 /// Function that evaluates the constraints, @f$ g(x) @f$
429 /// @param [in] x
430 /// Decision variable @f$ x \in \R^n @f$
431 /// @param [out] gx
432 /// Value of the constraints @f$ g(x) \in \R^m @f$
433 void eval_constraints(crvec x, rvec gx) const;
434 /// **[Required]**
435 /// Function that evaluates the gradient of the constraints times a vector,
436 /// @f$ \nabla g(x)\,y = \tp{\jac_g(x)}y @f$
437 /// @param [in] x
438 /// Decision variable @f$ x \in \R^n @f$
439 /// @param [in] y
440 /// Vector @f$ y \in \R^m @f$ to multiply the gradient by
441 /// @param [out] grad_gxy
442 /// Gradient of the constraints
443 /// @f$ \nabla g(x)\,y \in \R^n @f$
445
446 /// @}
447
448 /// @name Projections onto constraint sets and proximal mappings
449 /// @{
450
451 /// **[Required]**
452 /// Function that evaluates the difference between the given point @f$ z @f$
453 /// and its projection onto the constraint set @f$ D @f$.
454 /// @param [in] z
455 /// Slack variable, @f$ z \in \R^m @f$
456 /// @param [out] e
457 /// The difference relative to its projection,
458 /// @f$ e = z - \Pi_D(z) \in \R^m @f$
459 /// @note @p z and @p e can refer to the same vector.
461 /// **[Required]**
462 /// Function that projects the Lagrange multipliers for ALM.
463 /// @param [inout] y
464 /// Multipliers, @f$ y \leftarrow \Pi_Y(y) \in \R^m @f$
465 /// @param [in] M
466 /// The radius/size of the set @f$ Y @f$.
467 /// See @ref ALMParams::max_multiplier.
469 /// **[Required]**
470 /// Function that computes a proximal gradient step.
471 /// @param [in] γ
472 /// Step size, @f$ \gamma \in \R_{>0} @f$
473 /// @param [in] x
474 /// Decision variable @f$ x \in \R^n @f$
475 /// @param [in] grad_ψ
476 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
477 /// @param [out] x̂
478 /// Next proximal gradient iterate, @f$ \hat x = T_\gamma(x) =
479 /// \prox_{\gamma h}(x - \gamma\nabla\psi(x)) \in \R^n @f$
480 /// @param [out] p
481 /// The proximal gradient step,
482 /// @f$ p = \hat x - x \in \R^n @f$
483 /// @return The nonsmooth function evaluated at x̂,
484 /// @f$ h(\hat x) @f$.
485 /// @note The vector @f$ p @f$ is often used in stopping criteria, so its
486 /// numerical accuracy is more important than that of @f$ \hat x @f$.
488 /// **[Optional]**
489 /// Function that computes the inactive indices @f$ \mathcal J(x) @f$ for
490 /// the evaluation of the linear Newton approximation of the residual, as in
491 /// @cite pas2022alpaqa.
492 /// @param [in] γ
493 /// Step size, @f$ \gamma \in \R_{>0} @f$
494 /// @param [in] x
495 /// Decision variable @f$ x \in \R^n @f$
496 /// @param [in] grad_ψ
497 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
498 /// @param [out] J
499 /// The indices of the components of @f$ x @f$ that are in the
500 /// index set @f$ \mathcal J(x) @f$. In ascending order, at most n.
501 /// @return The number of inactive constraints, @f$ \# \mathcal J(x) @f$.
502 ///
503 /// For example, in the case of box constraints, we have
504 /// @f[ \mathcal J(x) \defeq \defset{i \in \N_{[0, n-1]}}{\underline x_i
505 /// \lt x_i - \gamma\nabla_{\!x_i}\psi(x) \lt \overline x_i}. @f]
507 rindexvec J) const;
508 /// **[Optional]**
509 /// Function that computes the diagonal Jacobian of the proximal mapping of
510 /// @f$ h(x) @f$.
511 /// @param [in] γ
512 /// Step size, @f$ \gamma \in \R_{>0} @f$
513 /// @param [in] x
514 /// Decision variable @f$ x \in \R^n @f$
515 /// @param [out] J_diag
516 /// The diagonal elements of the Jacobian of the prox of the
517 /// nonsmooth objective @f$ h(x) @f$.
518 void eval_prox_jacobian_diag(real_t γ, crvec x, rvec J_diag) const;
519 /// **[Optional]**
520 /// Function that evaluates the non-smooth term of the cost @f$ h(x) @f$.
521 /// @param [in] x
522 /// Decision variable @f$ x \in \R^n @f$
523 /// @return @f$ h(x) @f$
524 [[nodiscard]] real_t eval_nonsmooth_objective(crvec x) const;
525
526 /// @}
527
528 /// @name Constraint sets
529 /// @{
530
531 /// **[Optional]**
532 /// Get the rectangular constraint set of the decision variables,
533 /// @f$ x \in C @f$.
534 [[nodiscard]] const Box &get_variable_bounds() const;
535 /// **[Optional]**
536 /// Get the rectangular constraint set of the general constraint function,
537 /// @f$ g(x) \in D @f$.
538 [[nodiscard]] const Box &get_general_bounds() const;
539
540 /// @}
541
542 /// @name Functions for second-order solvers
543 /// @{
544
545 /// **[Optional]**
546 /// Function that evaluates the nonzero values of the Jacobian matrix of the
547 /// constraints, @f$ \jac_g(x) @f$
548 /// @param [in] x
549 /// Decision variable @f$ x \in \R^n @f$
550 /// @param [out] J_values
551 /// Nonzero values of the Jacobian
552 /// @f$ \jac_g(x) \in \R^{m\times n} @f$
553 ///
554 /// Required for second-order solvers only.
555 void eval_constraints_jacobian(crvec x, rvec J_values) const;
556 /// **[Optional]**
557 /// Function that returns (a view of) the sparsity pattern of the Jacobian
558 /// of the constraints.
559 ///
560 /// Required for second-order solvers only.
561 [[nodiscard]] Sparsity get_constraints_jacobian_sparsity() const;
562 /// **[Optional]**
563 /// Function that evaluates the gradient of one specific constraint,
564 /// @f$ \nabla g_i(x) @f$
565 /// @param [in] x
566 /// Decision variable @f$ x \in \R^n @f$
567 /// @param [in] i
568 /// Which constraint @f$ 0 \le i \lt m @f$
569 /// @param [out] grad_gi
570 /// Gradient of the constraint
571 /// @f$ \nabla g_i(x) \in \R^n @f$
572 ///
573 /// Required for second-order solvers only.
574 void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const;
575 /// **[Optional]**
576 /// Function that evaluates the Hessian of the Lagrangian multiplied by a
577 /// vector,
578 /// @f$ \nabla_{xx}^2L(x, y)\,v @f$
579 /// @param [in] x
580 /// Decision variable @f$ x \in \R^n @f$
581 /// @param [in] y
582 /// Lagrange multipliers @f$ y \in \R^m @f$
583 /// @param [in] scale
584 /// Scale factor for the cost function.
585 /// @param [in] v
586 /// Vector to multiply by @f$ v \in \R^n @f$
587 /// @param [out] Hv
588 /// Hessian-vector product
589 /// @f$ \nabla_{xx}^2 L(x, y)\,v \in \R^{n} @f$
590 ///
591 /// Required for second-order solvers only.
593 /// **[Optional]**
594 /// Function that evaluates the nonzero values of the Hessian of the
595 /// Lagrangian, @f$ \nabla_{xx}^2L(x, y) @f$
596 /// @param [in] x
597 /// Decision variable @f$ x \in \R^n @f$
598 /// @param [in] y
599 /// Lagrange multipliers @f$ y \in \R^m @f$
600 /// @param [in] scale
601 /// Scale factor for the cost function.
602 /// @param [out] H_values
603 /// Nonzero values of the Hessian
604 /// @f$ \nabla_{xx}^2 L(x, y) \in \R^{n\times n} @f$.
605 ///
606 /// Required for second-order solvers only.
607 void eval_lagrangian_hessian(crvec x, crvec y, real_t scale, rvec H_values) const;
608 /// **[Optional]**
609 /// Function that returns (a view of) the sparsity pattern of the Hessian of
610 /// the Lagrangian.
611 ///
612 /// Required for second-order solvers only.
613 [[nodiscard]] Sparsity get_lagrangian_hessian_sparsity() const;
614 /// **[Optional]**
615 /// Function that evaluates the Hessian of the augmented Lagrangian
616 /// multiplied by a vector,
617 /// @f$ \nabla_{xx}^2L_\Sigma(x, y)\,v @f$
618 /// @param [in] x
619 /// Decision variable @f$ x \in \R^n @f$
620 /// @param [in] y
621 /// Lagrange multipliers @f$ y \in \R^m @f$
622 /// @param [in] Σ
623 /// Penalty weights @f$ \Sigma @f$
624 /// @param [in] scale
625 /// Scale factor for the cost function.
626 /// @param [in] v
627 /// Vector to multiply by @f$ v \in \R^n @f$
628 /// @param [out] Hv
629 /// Hessian-vector product
630 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y)\,v \in \R^{n} @f$
631 ///
632 /// Required for second-order solvers only.
634 rvec Hv) const;
635 /// **[Optional]**
636 /// Function that evaluates the nonzero values of the Hessian of the
637 /// augmented Lagrangian, @f$ \nabla_{xx}^2L_\Sigma(x, y) @f$
638 /// @param [in] x
639 /// Decision variable @f$ x \in \R^n @f$
640 /// @param [in] y
641 /// Lagrange multipliers @f$ y \in \R^m @f$
642 /// @param [in] Σ
643 /// Penalty weights @f$ \Sigma @f$
644 /// @param [in] scale
645 /// Scale factor for the cost function.
646 /// @param [out] H_values
647 /// Nonzero values of the Hessian
648 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y) \in \R^{n\times n} @f$
649 ///
650 /// Required for second-order solvers only.
652 rvec H_values) const;
653 /// **[Optional]**
654 /// Function that returns (a view of) the sparsity pattern of the Hessian of
655 /// the augmented Lagrangian.
656 ///
657 /// Required for second-order solvers only.
658 [[nodiscard]] Sparsity get_augmented_lagrangian_hessian_sparsity() const;
659
660 /// @}
661
662 /// @name Combined evaluations
663 /// @{
664
665 /// **[Optional]**
666 /// Evaluate both @f$ f(x) @f$ and its gradient, @f$ \nabla f(x) @f$.
667 /// @default_impl ProblemVTable::default_eval_objective_and_gradient
669 /// **[Optional]**
670 /// Evaluate both @f$ f(x) @f$ and @f$ g(x) @f$.
671 /// @default_impl ProblemVTable::default_eval_objective_and_constraints
673 /// **[Optional]**
674 /// Evaluate both @f$ \nabla f(x) @f$ and @f$ \nabla g(x)\,y @f$.
675 /// @default_impl ProblemVTable::default_eval_objective_gradient_and_constraints_gradient_product
677 rvec grad_gxy) const;
678 /// **[Optional]**
679 /// Evaluate the gradient of the Lagrangian
680 /// @f$ \nabla_x L(x, y) = \nabla f(x) + \nabla g(x)\,y @f$
681 /// @default_impl ProblemVTable::default_eval_lagrangian_gradient
682 void eval_lagrangian_gradient(crvec x, crvec y, rvec grad_L, rvec work_n) const;
683
684 /// @}
685
686 /// @name Augmented Lagrangian
687 /// @{
688
689 /// **[Optional]**
690 /// Calculate both ψ(x) and the vector ŷ that can later be used to compute
691 /// ∇ψ.
692 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
693 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
694 /// @f[ \hat y = \Sigma\, \left(g(x) + \Sigma^{-1}y - \Pi_D\left(g(x)
695 /// + \Sigma^{-1}y\right)\right) @f]
696 /// @default_impl ProblemVTable::default_eval_augmented_lagrangian
697 [[nodiscard]] real_t
698 eval_augmented_lagrangian(crvec x, ///< [in] Decision variable @f$ x @f$
699 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
700 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
701 rvec ŷ ///< [out] @f$ \hat y @f$
702 ) const;
703 /// **[Optional]**
704 /// Calculate the gradient ∇ψ(x).
705 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
706 /// @default_impl ProblemVTable::default_eval_augmented_lagrangian_gradient
707 void eval_augmented_lagrangian_gradient(crvec x, ///< [in] Decision variable @f$ x @f$
708 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
709 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
710 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
711 rvec work_n, ///< Dimension @f$ n @f$
712 rvec work_m ///< Dimension @f$ m @f$
713 ) const;
714 /// **[Optional]**
715 /// Calculate both ψ(x) and its gradient ∇ψ(x).
716 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
717 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
718 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
719 /// @default_impl ProblemVTable::default_eval_augmented_lagrangian_and_gradient
720 [[nodiscard]] real_t
721 eval_augmented_lagrangian_and_gradient(crvec x, ///< [in] Decision variable @f$ x @f$
722 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
723 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
724 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
725 rvec work_n, ///< Dimension @f$ n @f$
726 rvec work_m ///< Dimension @f$ m @f$
727 ) const;
728
729 /// @}
730
731 /// @name Checks
732 /// @{
733
734 /// **[Optional]**
735 /// Check that the problem formulation is well-defined, the dimensions match,
736 /// etc. Throws an exception if this is not the case.
737 void check() const;
738
739 /// @}
740
741 /// @name Metadata
742 /// @{
743
744 /// **[Optional]**
745 /// Get a descriptive name for the problem.
746 [[nodiscard]] std::string get_name() const;
747
748 /// @}
749
750 /// @name Querying specialized implementations
751 /// @{
752
753 /// Returns true if the problem provides an implementation of
754 /// @ref eval_inactive_indices_res_lna.
755 [[nodiscard]] bool provides_eval_inactive_indices_res_lna() const {
756 return vtable.eval_inactive_indices_res_lna != vtable.default_eval_inactive_indices_res_lna;
757 }
758 /// Returns true if the problem provides an implementation of
759 /// @ref eval_prox_jacobian_diag.
760 [[nodiscard]] bool provides_eval_prox_jacobian_diag() const {
761 return vtable.eval_prox_jacobian_diag != vtable.default_eval_prox_jacobian_diag;
762 }
763 /// Returns true if the problem provides an implementation of
764 /// @ref eval_nonsmooth_objective.
765 [[nodiscard]] bool provides_eval_nonsmooth_objective() const {
766 return vtable.eval_nonsmooth_objective != vtable.default_eval_nonsmooth_objective;
767 }
768 /// Returns true if the problem provides an implementation of
769 /// @ref eval_constraints_jacobian.
770 [[nodiscard]] bool provides_eval_constraints_jacobian() const {
771 return vtable.eval_constraints_jacobian != vtable.default_eval_constraints_jacobian;
772 }
773 /// Returns true if the problem provides an implementation of
774 /// @ref get_constraints_jacobian_sparsity.
776 return vtable.get_constraints_jacobian_sparsity !=
777 vtable.default_get_constraints_jacobian_sparsity;
778 }
779 /// Returns true if the problem provides an implementation of
780 /// @ref eval_grad_gi.
781 [[nodiscard]] bool provides_eval_grad_gi() const {
782 return vtable.eval_grad_gi != vtable.default_eval_grad_gi;
783 }
784 /// Returns true if the problem provides an implementation of
785 /// @ref eval_lagrangian_hessian_product.
786 [[nodiscard]] bool provides_eval_lagrangian_hessian_product() const {
787 return vtable.eval_lagrangian_hessian_product !=
788 vtable.default_eval_lagrangian_hessian_product;
789 }
790 /// Returns true if the problem provides an implementation of
791 /// @ref eval_lagrangian_hessian.
792 [[nodiscard]] bool provides_eval_lagrangian_hessian() const {
793 return vtable.eval_lagrangian_hessian != vtable.default_eval_lagrangian_hessian;
794 }
795 /// Returns true if the problem provides an implementation of
796 /// @ref get_lagrangian_hessian_sparsity.
797 [[nodiscard]] bool provides_get_lagrangian_hessian_sparsity() const {
798 return vtable.get_lagrangian_hessian_sparsity !=
799 vtable.default_get_lagrangian_hessian_sparsity;
800 }
801 /// Returns true if the problem provides an implementation of
802 /// @ref eval_augmented_lagrangian_hessian_product.
804 return vtable.eval_augmented_lagrangian_hessian_product !=
805 vtable.default_eval_augmented_lagrangian_hessian_product;
806 }
807 /// Returns true if the problem provides an implementation of
808 /// @ref eval_augmented_lagrangian_hessian.
810 return vtable.eval_augmented_lagrangian_hessian !=
811 vtable.default_eval_augmented_lagrangian_hessian;
812 }
813 /// Returns true if the problem provides an implementation of
814 /// @ref get_augmented_lagrangian_hessian_sparsity.
816 return vtable.get_augmented_lagrangian_hessian_sparsity !=
817 vtable.default_get_augmented_lagrangian_hessian_sparsity;
818 }
819 /// Returns true if the problem provides a specialized implementation of
820 /// @ref eval_objective_and_gradient, false if it uses the default implementation.
821 [[nodiscard]] bool provides_eval_objective_and_gradient() const {
822 return vtable.eval_objective_and_gradient != vtable.default_eval_objective_and_gradient;
823 }
824 /// Returns true if the problem provides a specialized implementation of
825 /// @ref eval_objective_and_constraints, false if it uses the default implementation.
826 [[nodiscard]] bool provides_eval_objective_and_constraints() const {
827 return vtable.eval_objective_and_constraints !=
828 vtable.default_eval_objective_and_constraints;
829 }
830 /// Returns true if the problem provides a specialized implementation of
831 /// @ref eval_objective_gradient_and_constraints_gradient_product, false if it uses the default implementation.
833 return vtable.eval_objective_gradient_and_constraints_gradient_product !=
834 vtable.default_eval_objective_gradient_and_constraints_gradient_product;
835 }
836 /// Returns true if the problem provides a specialized implementation of
837 /// @ref eval_lagrangian_gradient, false if it uses the default implementation.
838 [[nodiscard]] bool provides_eval_lagrangian_gradient() const {
839 return vtable.eval_lagrangian_gradient != vtable.default_eval_lagrangian_gradient;
840 }
841 /// Returns true if the problem provides a specialized implementation of
842 /// @ref eval_augmented_lagrangian, false if it uses the default implementation.
843 [[nodiscard]] bool provides_eval_augmented_lagrangian() const {
844 return vtable.eval_augmented_lagrangian != vtable.default_eval_augmented_lagrangian;
845 }
846 /// Returns true if the problem provides a specialized implementation of
847 /// @ref eval_augmented_lagrangian_gradient, false if it uses the default implementation.
849 return vtable.eval_augmented_lagrangian_gradient !=
850 vtable.default_eval_augmented_lagrangian_gradient;
851 }
852 /// Returns true if the problem provides a specialized implementation of
853 /// @ref eval_augmented_lagrangian_and_gradient, false if it uses the default implementation.
855 return vtable.eval_augmented_lagrangian_and_gradient !=
856 vtable.default_eval_augmented_lagrangian_and_gradient;
857 }
858 /// Returns true if the problem provides an implementation of
859 /// @ref get_variable_bounds.
860 [[nodiscard]] bool provides_get_variable_bounds() const {
861 return vtable.get_variable_bounds != vtable.default_get_variable_bounds;
862 }
863 /// Returns true if the problem provides an implementation of
864 /// @ref get_general_bounds.
865 [[nodiscard]] bool provides_get_general_bounds() const {
866 return vtable.get_general_bounds != vtable.default_get_general_bounds;
867 }
868 /// Returns true if the problem provides an implementation of @ref check.
869 [[nodiscard]] bool provides_check() const { return vtable.check != vtable.default_check; }
870 /// Returns true if the problem provides an implementation of @ref get_name.
871 [[nodiscard]] bool provides_get_name() const {
872 return vtable.get_name != vtable.default_get_name;
873 }
874
875 /// @}
876
877 /// @name Querying available functions
878 /// @{
879
880 /// Returns true if @ref eval_augmented_lagrangian_hessian_product can be called.
885 /// Returns true if @ref eval_augmented_lagrangian_hessian can be called.
888 (vtable.m == 0 && provides_eval_lagrangian_hessian());
889 }
890
891 /// @}
892
893 /// @name Helpers
894 /// @{
895
896 /// Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be
897 /// used to compute ψ(x) and ∇ψ(x).
898 ///
899 /// Computes the result using the following algorithm:
900 /// @f[ \begin{aligned}
901 /// \zeta &= g(x) + \Sigma^{-1} y \\[]
902 /// d &= \zeta - \Pi_D(\zeta)
903 /// = \operatorname{eval\_proj\_diff\_g}(\zeta, \zeta) \\[]
904 /// \hat y &= \Sigma d \\[]
905 /// \end{aligned} @f]
906 /// @see @ref page_math
907 ///
908 /// @param[inout] g_ŷ
909 /// Input @f$ g(x) @f$, outputs @f$ \hat y @f$
910 /// @param[in] y
911 /// Lagrange multipliers @f$ y @f$
912 /// @param[in] Σ
913 /// Penalty weights @f$ \Sigma @f$
914 /// @return The inner product @f$ d^\top \hat y @f$
916
917 /// @}
918};
919
920/// @}
921
922#ifndef DOXYGEN
923template <class Tref>
924explicit TypeErasedProblem(Tref &&d)
926
927template <class Tref, class Allocator>
928explicit TypeErasedProblem(Tref &&d, Allocator alloc)
930#endif
931
932template <Config Conf, class Allocator>
934 return vtable.n;
935}
936template <Config Conf, class Allocator>
938 return vtable.m;
939}
940
941template <Config Conf, class Allocator>
943 rvec e) const {
944 return call(vtable.eval_projecting_difference_constraints, z, e);
945}
946template <Config Conf, class Allocator>
948 return call(vtable.eval_projection_multipliers, y, M);
949}
950template <Config Conf, class Allocator>
952 crvec grad_ψ, rvec x̂,
953 rvec p) const -> real_t {
954 return call(vtable.eval_proximal_gradient_step, γ, x, grad_ψ, x̂, p);
955}
956template <Config Conf, class Allocator>
958 crvec grad_ψ,
959 rindexvec J) const
960 -> index_t {
961 return call(vtable.eval_inactive_indices_res_lna, γ, x, grad_ψ, J);
962}
963template <Config Conf, class Allocator>
965 rvec J_diag) const {
966 return call(vtable.eval_prox_jacobian_diag, γ, x, J_diag);
967}
968template <Config Conf, class Allocator>
970 return call(vtable.eval_nonsmooth_objective, x);
971}
972template <Config Conf, class Allocator>
974 return call(vtable.eval_objective, x);
975}
976template <Config Conf, class Allocator>
978 return call(vtable.eval_objective_gradient, x, grad_fx);
979}
980template <Config Conf, class Allocator>
982 return call(vtable.eval_constraints, x, gx);
983}
984template <Config Conf, class Allocator>
986 rvec grad_gxy) const {
987 return call(vtable.eval_constraints_gradient_product, x, y, grad_gxy);
988}
989template <Config Conf, class Allocator>
991 return call(vtable.eval_grad_gi, x, i, grad_gi);
992}
993template <Config Conf, class Allocator>
995 return call(vtable.eval_constraints_jacobian, x, J_values);
996}
997template <Config Conf, class Allocator>
999 return call(vtable.get_constraints_jacobian_sparsity);
1000}
1001template <Config Conf, class Allocator>
1003 real_t scale, crvec v,
1004 rvec Hv) const {
1005 return call(vtable.eval_lagrangian_hessian_product, x, y, scale, v, Hv);
1006}
1007template <Config Conf, class Allocator>
1009 rvec H_values) const {
1010 return call(vtable.eval_lagrangian_hessian, x, y, scale, H_values);
1011}
1012template <Config Conf, class Allocator>
1014 return call(vtable.get_lagrangian_hessian_sparsity);
1015}
1016template <Config Conf, class Allocator>
1018 crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const {
1019 return call(vtable.eval_augmented_lagrangian_hessian_product, x, y, Σ, scale, v, Hv);
1020}
1021template <Config Conf, class Allocator>
1023 crvec Σ, real_t scale,
1024 rvec H_values) const {
1025 return call(vtable.eval_augmented_lagrangian_hessian, x, y, Σ, scale, H_values);
1026}
1027template <Config Conf, class Allocator>
1029 -> Sparsity {
1030 return call(vtable.get_augmented_lagrangian_hessian_sparsity);
1031}
1032template <Config Conf, class Allocator>
1034 -> real_t {
1035 return call(vtable.eval_objective_and_gradient, x, grad_fx);
1036}
1037template <Config Conf, class Allocator>
1039 -> real_t {
1040 return call(vtable.eval_objective_and_constraints, x, g);
1041}
1042template <Config Conf, class Allocator>
1044 crvec x, crvec y, rvec grad_f, rvec grad_gxy) const {
1045 return call(vtable.eval_objective_gradient_and_constraints_gradient_product, x, y, grad_f,
1046 grad_gxy);
1047}
1048template <Config Conf, class Allocator>
1050 rvec work_n) const {
1051 return call(vtable.eval_lagrangian_gradient, x, y, grad_L, work_n);
1052}
1053template <Config Conf, class Allocator>
1055 rvec ŷ) const -> real_t {
1056 return call(vtable.eval_augmented_lagrangian, x, y, Σ, ŷ);
1057}
1058template <Config Conf, class Allocator>
1060 crvec Σ, rvec grad_ψ,
1061 rvec work_n,
1062 rvec work_m) const {
1063 return call(vtable.eval_augmented_lagrangian_gradient, x, y, Σ, grad_ψ, work_n, work_m);
1064}
1065template <Config Conf, class Allocator>
1067 crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const -> real_t {
1068 return call(vtable.eval_augmented_lagrangian_and_gradient, x, y, Σ, grad_ψ, work_n, work_m);
1069}
1070template <Config Conf, class Allocator>
1072 return call(vtable.calc_ŷ_dᵀŷ, g_ŷ, y, Σ);
1073}
1074template <Config Conf, class Allocator>
1076 return call(vtable.get_variable_bounds);
1077}
1078template <Config Conf, class Allocator>
1080 return call(vtable.get_general_bounds);
1081}
1082template <Config Conf, class Allocator>
1084 return call(vtable.check);
1085}
1086template <Config Conf, class Allocator>
1088 return call(vtable.get_name);
1089}
1090
1091/// @addtogroup grp_Problems
1092/// @{
1093
1094template <Config Conf>
1095void print_provided_functions(std::ostream &os, const TypeErasedProblem<Conf> &problem) {
1096 // clang-format off
1097 os << " eval_nonsmooth_objective: " << problem.provides_eval_nonsmooth_objective() << '\n'
1098 << " eval_inactive_indices_res_lna: " << problem.provides_eval_inactive_indices_res_lna() << '\n'
1099 << " eval_prox_jacobian_diag: " << problem.provides_eval_prox_jacobian_diag() << '\n'
1100 << " eval_grad_gi: " << problem.provides_eval_grad_gi() << '\n'
1101 << " eval_constraints_jacobian: " << problem.provides_eval_constraints_jacobian() << '\n'
1102 << " eval_lagrangian_hessian_product: " << problem.provides_eval_lagrangian_hessian_product() << '\n'
1103 << " eval_lagrangian_hessian: " << problem.provides_eval_lagrangian_hessian() << '\n'
1104 << " eval_augmented_lagrangian_hessian_product: " << problem.provides_eval_augmented_lagrangian_hessian_product() << '\n'
1105 << " eval_augmented_lagrangian_hessian: " << problem.provides_eval_augmented_lagrangian_hessian() << '\n'
1106 << " eval_objective_and_gradient: " << problem.provides_eval_objective_and_gradient() << '\n'
1107 << " eval_objective_and_constraints: " << problem.provides_eval_objective_and_constraints() << '\n'
1108 << " eval_objective_gradient_and_constraints_gradient_product: " << problem.provides_eval_objective_gradient_and_constraints_gradient_product() << '\n'
1109 << " eval_lagrangian_gradient: " << problem.provides_eval_lagrangian_gradient() << '\n'
1110 << " eval_augmented_lagrangian: " << problem.provides_eval_augmented_lagrangian() << '\n'
1111 << " eval_augmented_lagrangian_gradient: " << problem.provides_eval_augmented_lagrangian_gradient() << '\n'
1112 << " eval_augmented_lagrangian_and_gradient: " << problem.provides_eval_augmented_lagrangian_and_gradient() << '\n'
1113 << " get_variable_bounds: " << problem.provides_get_variable_bounds() << '\n'
1114 << " get_general_bounds: " << problem.provides_get_general_bounds() << '\n'
1115 << " check: " << problem.provides_check() << '\n'
1116 << " get_name: " << problem.provides_get_name() << '\n';
1117 // clang-format on
1118}
1119
1120/// @}
1121
1122} // namespace alpaqa
The main polymorphic minimization problem interface.
void eval_constraints(crvec x, rvec gx) const
[Required] Function that evaluates the constraints,
std::string get_name() const
[Optional] Get a descriptive name for the problem.
bool provides_eval_constraints_jacobian() const
Returns true if the problem provides an implementation of eval_constraints_jacobian.
Sparsity get_lagrangian_hessian_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the Lagrangian.
void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const
[Optional] Function that evaluates the gradient of one specific constraint,
void eval_lagrangian_hessian_product(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the Lagrangian multiplied by a vector,
real_t eval_augmented_lagrangian_and_gradient(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate both ψ(x) and its gradient ∇ψ(x).
void eval_lagrangian_hessian(crvec x, crvec y, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the Lagrangian,
bool provides_eval_augmented_lagrangian() const
Returns true if the problem provides a specialized implementation of eval_augmented_lagrangian,...
real_t eval_objective_and_gradient(crvec x, rvec grad_fx) const
[Optional] Evaluate both and its gradient, .
void eval_projection_multipliers(rvec y, real_t M) const
[Required] Function that projects the Lagrange multipliers for ALM.
bool provides_check() const
Returns true if the problem provides an implementation of check.
void check() const
[Optional] Check that the problem formulation is well-defined, the dimensions match,...
bool provides_eval_objective_and_gradient() const
Returns true if the problem provides a specialized implementation of eval_objective_and_gradient,...
bool provides_eval_augmented_lagrangian_and_gradient() const
Returns true if the problem provides a specialized implementation of eval_augmented_lagrangian_and_gr...
const Box & get_variable_bounds() const
[Optional] Get the rectangular constraint set of the decision variables, .
void eval_constraints_jacobian(crvec x, rvec J_values) const
[Optional] Function that evaluates the nonzero values of the Jacobian matrix of the constraints,
bool provides_eval_inactive_indices_res_lna() const
Returns true if the problem provides an implementation of eval_inactive_indices_res_lna.
bool provides_get_name() const
Returns true if the problem provides an implementation of get_name.
void eval_objective_gradient_and_constraints_gradient_product(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const
[Optional] Evaluate both and .
bool provides_eval_objective_gradient_and_constraints_gradient_product() const
Returns true if the problem provides a specialized implementation of eval_objective_gradient_and_cons...
bool provides_eval_objective_and_constraints() const
Returns true if the problem provides a specialized implementation of eval_objective_and_constraints,...
real_t eval_objective(crvec x) const
[Required] Function that evaluates the cost,
real_t eval_nonsmooth_objective(crvec x) const
[Optional] Function that evaluates the non-smooth term of the cost .
length_t get_num_constraints() const
[Required] Number of constraints.
void eval_augmented_lagrangian_hessian_product(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the augmented Lagrangian multiplied by a vector,
bool provides_get_lagrangian_hessian_sparsity() const
Returns true if the problem provides an implementation of get_lagrangian_hessian_sparsity.
static TypeErasedProblem make(Args &&...args)
index_t eval_inactive_indices_res_lna(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const
[Optional] Function that computes the inactive indices for the evaluation of the linear Newton appro...
bool supports_eval_augmented_lagrangian_hessian() const
Returns true if eval_augmented_lagrangian_hessian can be called.
bool provides_eval_lagrangian_hessian_product() const
Returns true if the problem provides an implementation of eval_lagrangian_hessian_product.
bool provides_get_constraints_jacobian_sparsity() const
Returns true if the problem provides an implementation of get_constraints_jacobian_sparsity.
length_t get_num_variables() const
[Required] Number of decision variables.
void eval_lagrangian_gradient(crvec x, crvec y, rvec grad_L, rvec work_n) const
[Optional] Evaluate the gradient of the Lagrangian
Sparsity get_constraints_jacobian_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Jacobian of the constraints.
void eval_augmented_lagrangian_gradient(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate the gradient ∇ψ(x).
const Box & get_general_bounds() const
[Optional] Get the rectangular constraint set of the general constraint function, .
void eval_augmented_lagrangian_hessian(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the augmented Lagrangian,
bool provides_eval_augmented_lagrangian_hessian() const
Returns true if the problem provides an implementation of eval_augmented_lagrangian_hessian.
real_t eval_objective_and_constraints(crvec x, rvec g) const
[Optional] Evaluate both and .
bool provides_eval_augmented_lagrangian_gradient() const
Returns true if the problem provides a specialized implementation of eval_augmented_lagrangian_gradie...
bool provides_get_variable_bounds() const
Returns true if the problem provides an implementation of get_variable_bounds.
bool provides_eval_grad_gi() const
Returns true if the problem provides an implementation of eval_grad_gi.
guanaqo::TypeErased< VTable, allocator_type > TypeErased
void eval_projecting_difference_constraints(crvec z, rvec e) const
[Required] Function that evaluates the difference between the given point and its projection onto th...
void eval_objective_gradient(crvec x, rvec grad_fx) const
[Required] Function that evaluates the gradient of the cost,
bool provides_eval_augmented_lagrangian_hessian_product() const
Returns true if the problem provides an implementation of eval_augmented_lagrangian_hessian_product.
bool provides_get_general_bounds() const
Returns true if the problem provides an implementation of get_general_bounds.
bool provides_eval_lagrangian_hessian() const
Returns true if the problem provides an implementation of eval_lagrangian_hessian.
void eval_constraints_gradient_product(crvec x, crvec y, rvec grad_gxy) const
[Required] Function that evaluates the gradient of the constraints times a vector,
bool provides_get_augmented_lagrangian_hessian_sparsity() const
Returns true if the problem provides an implementation of get_augmented_lagrangian_hessian_sparsity.
real_t calc_ŷ_dᵀŷ(rvec g_ŷ, crvec y, crvec Σ) const
Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be used to compute ψ(x) and ∇ψ(...
bool provides_eval_prox_jacobian_diag() const
Returns true if the problem provides an implementation of eval_prox_jacobian_diag.
Sparsity get_augmented_lagrangian_hessian_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the augmented Lag...
real_t eval_augmented_lagrangian(crvec x, crvec y, crvec Σ, rvec ŷ) const
[Optional] Calculate both ψ(x) and the vector ŷ that can later be used to compute ∇ψ.
void eval_prox_jacobian_diag(real_t γ, crvec x, rvec J_diag) const
[Optional] Function that computes the diagonal Jacobian of the proximal mapping of .
bool provides_eval_lagrangian_gradient() const
Returns true if the problem provides a specialized implementation of eval_lagrangian_gradient,...
bool provides_eval_nonsmooth_objective() const
Returns true if the problem provides an implementation of eval_nonsmooth_objective.
bool supports_eval_augmented_lagrangian_hessian_product() const
Returns true if eval_augmented_lagrangian_hessian_product can be called.
real_t eval_proximal_gradient_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const
[Required] Function that computes a proximal gradient step.
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:77
#define ALPAQA_IF_QUADF(...)
Definition config.hpp:223
#define ALPAQA_IF_LONGD(...)
Definition config.hpp:235
#define ALPAQA_IF_FLOAT(...)
Definition config.hpp:229
#define ALPAQA_EXPORT_EXTERN_TEMPLATE(...)
Definition export.hpp:21
void print_provided_functions(std::ostream &os, const TypeErasedProblem< Conf > &problem)
typename Conf::real_t real_t
Definition config.hpp:86
typename Conf::rindexvec rindexvec
Definition config.hpp:106
typename Conf::index_t index_t
Definition config.hpp:104
typename Conf::length_t length_t
Definition config.hpp:103
typename Conf::rvec rvec
Definition config.hpp:91
typename Conf::crvec crvec
Definition config.hpp:92
Double-precision double configuration.
Definition config.hpp:176
Single-precision float configuration.
Definition config.hpp:172
long double configuration.
Definition config.hpp:181
Struct containing function pointers to all problem functions (like the objective and constraint funct...
static std::string default_get_name(const void *, const ProblemVTable &)
optional_function_t< void(crvec x, rvec J_values) const > eval_constraints_jacobian
guanaqo::required_function_t< F > required_function_t
static Sparsity default_get_lagrangian_hessian_sparsity(const void *, const ProblemVTable &vtable)
optional_function_t< void() const > check
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const > eval_augmented_lagrangian_hessian_product
guanaqo::optional_function_t< F, ProblemVTable > optional_function_t
optional_function_t< Sparsity() const > get_lagrangian_hessian_sparsity
optional_function_t< Sparsity() const > get_constraints_jacobian_sparsity
static void default_eval_lagrangian_hessian_product(const void *, crvec, crvec, real_t, crvec, rvec, const ProblemVTable &)
static void default_eval_prox_jacobian_diag(const void *, real_t, crvec, rvec, const ProblemVTable &)
static const Box & default_get_variable_bounds(const void *, const ProblemVTable &)
optional_function_t< real_t(crvec x) const > eval_nonsmooth_objective
optional_function_t< void(crvec x, crvec y, real_t scale, rvec H_values) const > eval_lagrangian_hessian
static real_t default_eval_nonsmooth_objective(const void *, crvec, const ProblemVTable &)
static index_t default_eval_inactive_indices_res_lna(const void *, real_t, crvec, crvec, rindexvec, const ProblemVTable &)
optional_function_t< void(crvec x, index_t i, rvec grad_gi) const > eval_grad_gi
required_function_t< void(rvec y, real_t M) const > eval_projection_multipliers
optional_function_t< void(real_t γ, crvec x, rvec J_diag) const > eval_prox_jacobian_diag
ProblemVTable(std::in_place_t, P &p)
static void default_eval_grad_gi(const void *, crvec, index_t, rvec, const ProblemVTable &)
optional_function_t< real_t(crvec x, rvec grad_fx) const > eval_objective_and_gradient
static real_t default_eval_objective_and_constraints(const void *self, crvec x, rvec g, const ProblemVTable &vtable)
static void default_eval_augmented_lagrangian_hessian_product(const void *self, crvec x, crvec y, crvec, real_t scale, crvec v, rvec Hv, const ProblemVTable &vtable)
required_function_t< void(crvec x, rvec grad_fx) const > eval_objective_gradient
static real_t default_eval_augmented_lagrangian_and_gradient(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_augmented_lagrangian_gradient
optional_function_t< const Box &() const > get_variable_bounds
required_function_t< real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const > eval_proximal_gradient_step
static real_t default_eval_objective_and_gradient(const void *self, crvec x, rvec grad_fx, const ProblemVTable &vtable)
required_function_t< void(crvec z, rvec e) const > eval_projecting_difference_constraints
optional_function_t< void(crvec x, crvec y, rvec grad_L, rvec work_n) const > eval_lagrangian_gradient
static void default_eval_objective_gradient_and_constraints_gradient_product(const void *self, crvec x, crvec y, rvec grad_f, rvec grad_gxy, const ProblemVTable &vtable)
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec ŷ) const > eval_augmented_lagrangian
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const > eval_augmented_lagrangian_hessian
static void default_eval_augmented_lagrangian_gradient(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const > eval_objective_gradient_and_constraints_gradient_product
required_function_t< void(crvec x, rvec gx) const > eval_constraints
static void default_eval_augmented_lagrangian_hessian(const void *self, crvec x, crvec y, crvec, real_t scale, rvec H_values, const ProblemVTable &vtable)
optional_function_t< Sparsity() const > get_augmented_lagrangian_hessian_sparsity
required_function_t< void(crvec x, crvec y, rvec grad_gxy) const > eval_constraints_gradient_product
static const Box & default_get_general_bounds(const void *, const ProblemVTable &)
static real_t default_eval_augmented_lagrangian(const void *self, crvec x, crvec y, crvec Σ, rvec ŷ, const ProblemVTable &vtable)
alpaqa::Box< config_t > Box
optional_function_t< std::string() const > get_name
static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ, const ProblemVTable &vtable)
static Sparsity default_get_augmented_lagrangian_hessian_sparsity(const void *self, const ProblemVTable &vtable)
required_function_t< real_t(crvec x) const > eval_objective
optional_function_t< void(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const > eval_lagrangian_hessian_product
optional_function_t< const Box &() const > get_general_bounds
static void default_check(const void *, const ProblemVTable &)
static Sparsity default_get_constraints_jacobian_sparsity(const void *, const ProblemVTable &vtable)
static void default_eval_constraints_jacobian(const void *, crvec, rvec, const ProblemVTable &vtable)
optional_function_t< index_t(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const > eval_inactive_indices_res_lna
optional_function_t< real_t(crvec x, rvec g) const > eval_objective_and_constraints
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_augmented_lagrangian_and_gradient
static void default_eval_lagrangian_hessian(const void *, crvec, crvec, real_t, rvec, const ProblemVTable &)
static void default_eval_lagrangian_gradient(const void *self, crvec x, crvec y, rvec grad_L, rvec work_n, const ProblemVTable &vtable)
#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_END
#define ALPAQA_SUPPRESS_NONEXPORTED_BASE_WARNING_START