alpaqa sparse
Nonconvex constrained optimization
Loading...
Searching...
No Matches
type-erased-problem.hpp
Go to the documentation of this file.
1#pragma once
2
4#include <alpaqa/export.hpp>
12#include <chrono>
13#include <stdexcept>
14#include <type_traits>
15#include <utility>
16
17namespace alpaqa {
18
19/// Struct containing function pointers to all problem functions (like the
20/// objective and constraint functions, with their derivatives, and more).
21/// Some default implementations are available.
22/// Internal struct, it is used by @ref TypeErasedProblem.
23template <Config Conf>
28
29 template <class F>
31 template <class F>
34
35 // clang-format off
36
37 // Required
54
55 // Second order
74
75 // Combined evaluations
82
83 // Lagrangian and augmented lagrangian evaluations
84 optional_const_function_t<void(crvec x, crvec y, rvec grad_L, rvec work_n)>
88 optional_const_function_t<void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)>
90 optional_const_function_t<real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)>
92
93 // Constraint sets
98
99 // Check
102
103 // clang-format on
104
105 ALPAQA_EXPORT static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ,
106 const ProblemVTable &vtable);
109 const ProblemVTable &);
110 ALPAQA_EXPORT static void default_eval_jac_g(const void *, crvec, rvec, const ProblemVTable &);
112 ALPAQA_EXPORT static void default_eval_grad_gi(const void *, crvec, index_t, rvec,
113 const ProblemVTable &);
114 ALPAQA_EXPORT static void default_eval_hess_L_prod(const void *, crvec, crvec, real_t, crvec,
115 rvec, const ProblemVTable &);
116 ALPAQA_EXPORT static void default_eval_hess_L(const void *, crvec, crvec, real_t, rvec,
117 const ProblemVTable &);
119 ALPAQA_EXPORT static void default_eval_hess_ψ_prod(const void *self, crvec x, crvec y, crvec,
121 const ProblemVTable &vtable);
122 ALPAQA_EXPORT static void default_eval_hess_ψ(const void *self, crvec x, crvec y, crvec,
124 const ProblemVTable &vtable);
126 ALPAQA_EXPORT static real_t default_eval_f_grad_f(const void *self, crvec x, rvec grad_fx,
127 const ProblemVTable &vtable);
128 ALPAQA_EXPORT static real_t default_eval_f_g(const void *self, crvec x, rvec g,
129 const ProblemVTable &vtable);
130 ALPAQA_EXPORT static void default_eval_grad_f_grad_g_prod(const void *self, crvec x, crvec y,
131 rvec grad_f, rvec grad_gxy,
132 const ProblemVTable &vtable);
133 ALPAQA_EXPORT static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L,
134 rvec work_n, const ProblemVTable &vtable);
135 ALPAQA_EXPORT static real_t default_eval_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec ŷ,
136 const ProblemVTable &vtable);
137 ALPAQA_EXPORT static void default_eval_grad_ψ(const void *self, crvec x, crvec y, crvec Σ,
138 rvec grad_ψ, rvec work_n, rvec work_m,
139 const ProblemVTable &vtable);
140 ALPAQA_EXPORT static real_t default_eval_ψ_grad_ψ(const void *self, crvec x, crvec y, crvec Σ,
141 rvec grad_ψ, rvec work_n, rvec work_m,
142 const ProblemVTable &vtable);
143 ALPAQA_EXPORT static const Box &default_get_box_C(const void *, const ProblemVTable &);
144 ALPAQA_EXPORT static const Box &default_get_box_D(const void *, const ProblemVTable &);
145 ALPAQA_EXPORT static void default_check(const void *, const ProblemVTable &);
146
148
149 template <class P>
150 ProblemVTable(std::in_place_t, P &p) : util::BasicVTable{std::in_place, p} {
151 auto &vtable = *this;
152
153 // Initialize all methods
154
155 // Required
164 // Second order
174 // Combined evaluations
176 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_f_g, p);
178 // Lagrangian and augmented lagrangian evaluations
180 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_ψ, p);
183 // Constraint set
186 // Check
187 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, check, p);
188
189 // Dimensions
190 vtable.n = p.get_n();
191 vtable.m = p.get_m();
192 }
193 ProblemVTable() = default;
194};
195
196// clang-format off
201// clang-format on
202
203/// @addtogroup grp_Problems
204/// @{
205
206/// The main polymorphic minimization problem interface.
207///
208/// This class wraps the actual problem implementation class, filling in the
209/// missing member functions with sensible defaults, and providing a uniform
210/// interface that is used by the solvers.
211///
212/// The problem implementations do not inherit from an abstract base class.
213/// Instead, [structural typing](https://en.wikipedia.org/wiki/Structural_type_system)
214/// is used. The @ref ProblemVTable constructor uses reflection to discover
215/// which member functions are provided by the problem implementation. See
216/// @ref page-problem-formulations for more information, and
217/// @ref C++/CustomCppProblem/main.cpp for an example.
218template <Config Conf = DefaultConfig, class Allocator = std::allocator<std::byte>>
220 public:
227 using TypeErased::TypeErased;
228
229 protected:
230 using TypeErased::call;
231 using TypeErased::self;
232 using TypeErased::vtable;
233
234 public:
235 template <class T, class... Args>
237 return TypeErased::template make<TypeErasedProblem, T>(std::forward<Args>(args)...);
238 }
239
240 /// @name Problem dimensions
241 /// @{
242
243 /// **[Required]**
244 /// Number of decision variables.
246 /// **[Required]**
247 /// Number of constraints.
249
250 /// @}
251
252 /// @name Required cost and constraint functions
253 /// @{
254
255 /// **[Required]**
256 /// Function that evaluates the cost, @f$ f(x) @f$
257 /// @param [in] x
258 /// Decision variable @f$ x \in \R^n @f$
260 /// **[Required]**
261 /// Function that evaluates the gradient of the cost, @f$ \nabla f(x) @f$
262 /// @param [in] x
263 /// Decision variable @f$ x \in \R^n @f$
264 /// @param [out] grad_fx
265 /// Gradient of cost function @f$ \nabla f(x) \in \R^n @f$
267 /// **[Required]**
268 /// Function that evaluates the constraints, @f$ g(x) @f$
269 /// @param [in] x
270 /// Decision variable @f$ x \in \R^n @f$
271 /// @param [out] gx
272 /// Value of the constraints @f$ g(x) \in \R^m @f$
273 void eval_g(crvec x, rvec gx) const;
274 /// **[Required]**
275 /// Function that evaluates the gradient of the constraints times a vector,
276 /// @f$ \nabla g(x)\,y = \tp{\jac_g(x)}y @f$
277 /// @param [in] x
278 /// Decision variable @f$ x \in \R^n @f$
279 /// @param [in] y
280 /// Vector @f$ y \in \R^m @f$ to multiply the gradient by
281 /// @param [out] grad_gxy
282 /// Gradient of the constraints
283 /// @f$ \nabla g(x)\,y \in \R^n @f$
285
286 /// @}
287
288 /// @name Projections onto constraint sets and proximal mappings
289 /// @{
290
291 /// **[Required]**
292 /// Function that evaluates the difference between the given point @f$ z @f$
293 /// and its projection onto the constraint set @f$ D @f$.
294 /// @param [in] z
295 /// Slack variable, @f$ z \in \R^m @f$
296 /// @param [out] e
297 /// The difference relative to its projection,
298 /// @f$ e = z - \Pi_D(z) \in \R^m @f$
299 /// @note @p z and @p e can refer to the same vector.
300 void eval_proj_diff_g(crvec z, rvec e) const;
301 /// **[Required]**
302 /// Function that projects the Lagrange multipliers for ALM.
303 /// @param [inout] y
304 /// Multipliers, @f$ y \leftarrow \Pi_Y(y) \in \R^m @f$
305 /// @param [in] M
306 /// The radius/size of the set @f$ Y @f$.
307 /// See @ref ALMParams::max_multiplier.
309 /// **[Required]**
310 /// Function that computes a proximal gradient step.
311 /// @param [in] γ
312 /// Step size, @f$ \gamma \in \R_{>0} @f$
313 /// @param [in] x
314 /// Decision variable @f$ x \in \R^n @f$
315 /// @param [in] grad_ψ
316 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
317 /// @param [out] x̂
318 /// Next proximal gradient iterate, @f$ \hat x = T_\gamma(x) =
319 /// \prox_{\gamma h}(x - \gamma\nabla\psi(x)) \in \R^n @f$
320 /// @param [out] p
321 /// The proximal gradient step,
322 /// @f$ p = \hat x - x \in \R^n @f$
323 /// @return The nonsmooth function evaluated at x̂,
324 /// @f$ h(\hat x) @f$.
325 /// @note The vector @f$ p @f$ is often used in stopping criteria, so its
326 /// numerical accuracy is more important than that of @f$ \hat x @f$.
327 real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const;
328 /// **[Optional]**
329 /// Function that computes the inactive indices @f$ \mathcal J(x) @f$ for
330 /// the evaluation of the linear Newton approximation of the residual, as in
331 /// @cite pas2022alpaqa.
332 /// @param [in] γ
333 /// Step size, @f$ \gamma \in \R_{>0} @f$
334 /// @param [in] x
335 /// Decision variable @f$ x \in \R^n @f$
336 /// @param [in] grad_ψ
337 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
338 /// @param [out] J
339 /// The indices of the components of @f$ x @f$ that are in the
340 /// index set @f$ \mathcal J(x) @f$. In ascending order, at most n.
341 /// @return The number of inactive constraints, @f$ \# \mathcal J(x) @f$.
342 ///
343 /// For example, in the case of box constraints, we have
344 /// @f[ \mathcal J(x) \defeq \defset{i \in \N_{[0, n-1]}}{\underline x_i
345 /// \lt x_i - \gamma\nabla_{\!x_i}\psi(x) \lt \overline x_i}. @f]
347 rindexvec J) const;
348
349 /// @}
350
351 /// @name Constraint sets
352 /// @{
353
354 /// **[Optional]**
355 /// Get the rectangular constraint set of the decision variables,
356 /// @f$ x \in C @f$.
357 [[nodiscard]] const Box &get_box_C() const;
358 /// **[Optional]**
359 /// Get the rectangular constraint set of the general constraint function,
360 /// @f$ g(x) \in D @f$.
361 [[nodiscard]] const Box &get_box_D() const;
362
363 /// @}
364
365 /// @name Functions for second-order solvers
366 /// @{
367
368 /// **[Optional]**
369 /// Function that evaluates the nonzero values of the Jacobian matrix of the
370 /// constraints, @f$ \jac_g(x) @f$
371 /// @param [in] x
372 /// Decision variable @f$ x \in \R^n @f$
373 /// @param [out] J_values
374 /// Nonzero values of the Jacobian
375 /// @f$ \jac_g(x) \in \R^{m\times n} @f$
376 ///
377 /// Required for second-order solvers only.
379 /// **[Optional]**
380 /// Function that returns (a view of) the sparsity pattern of the Jacobian
381 /// of the constraints.
382 ///
383 /// Required for second-order solvers only.
385 /// **[Optional]**
386 /// Function that evaluates the gradient of one specific constraint,
387 /// @f$ \nabla g_i(x) @f$
388 /// @param [in] x
389 /// Decision variable @f$ x \in \R^n @f$
390 /// @param [in] i
391 /// Which constraint @f$ 0 \le i \lt m @f$
392 /// @param [out] grad_gi
393 /// Gradient of the constraint
394 /// @f$ \nabla g_i(x) \in \R^n @f$
395 ///
396 /// Required for second-order solvers only.
397 void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const;
398 /// **[Optional]**
399 /// Function that evaluates the Hessian of the Lagrangian multiplied by a
400 /// vector,
401 /// @f$ \nabla_{xx}^2L(x, y)\,v @f$
402 /// @param [in] x
403 /// Decision variable @f$ x \in \R^n @f$
404 /// @param [in] y
405 /// Lagrange multipliers @f$ y \in \R^m @f$
406 /// @param [in] scale
407 /// Scale factor for the cost function.
408 /// @param [in] v
409 /// Vector to multiply by @f$ v \in \R^n @f$
410 /// @param [out] Hv
411 /// Hessian-vector product
412 /// @f$ \nabla_{xx}^2 L(x, y)\,v \in \R^{n} @f$
413 ///
414 /// Required for second-order solvers only.
416 /// **[Optional]**
417 /// Function that evaluates the nonzero values of the Hessian of the
418 /// Lagrangian, @f$ \nabla_{xx}^2L(x, y) @f$
419 /// @param [in] x
420 /// Decision variable @f$ x \in \R^n @f$
421 /// @param [in] y
422 /// Lagrange multipliers @f$ y \in \R^m @f$
423 /// @param [in] scale
424 /// Scale factor for the cost function.
425 /// @param [out] H_values
426 /// Nonzero values of the Hessian
427 /// @f$ \nabla_{xx}^2 L(x, y) \in \R^{n\times n} @f$.
428 ///
429 /// Required for second-order solvers only.
431 /// **[Optional]**
432 /// Function that returns (a view of) the sparsity pattern of the Hessian of
433 /// the Lagrangian.
434 ///
435 /// Required for second-order solvers only.
437 /// **[Optional]**
438 /// Function that evaluates the Hessian of the augmented Lagrangian
439 /// multiplied by a vector,
440 /// @f$ \nabla_{xx}^2L_\Sigma(x, y)\,v @f$
441 /// @param [in] x
442 /// Decision variable @f$ x \in \R^n @f$
443 /// @param [in] y
444 /// Lagrange multipliers @f$ y \in \R^m @f$
445 /// @param [in] Σ
446 /// Penalty weights @f$ \Sigma @f$
447 /// @param [in] scale
448 /// Scale factor for the cost function.
449 /// @param [in] v
450 /// Vector to multiply by @f$ v \in \R^n @f$
451 /// @param [out] Hv
452 /// Hessian-vector product
453 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y)\,v \in \R^{n} @f$
454 ///
455 /// Required for second-order solvers only.
457 /// **[Optional]**
458 /// Function that evaluates the nonzero values of the Hessian of the
459 /// augmented Lagrangian, @f$ \nabla_{xx}^2L_\Sigma(x, y) @f$
460 /// @param [in] x
461 /// Decision variable @f$ x \in \R^n @f$
462 /// @param [in] y
463 /// Lagrange multipliers @f$ y \in \R^m @f$
464 /// @param [in] Σ
465 /// Penalty weights @f$ \Sigma @f$
466 /// @param [in] scale
467 /// Scale factor for the cost function.
468 /// @param [out] H_values
469 /// Nonzero values of the Hessian
470 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y) \in \R^{n\times n} @f$
471 ///
472 /// Required for second-order solvers only.
474 /// **[Optional]**
475 /// Function that returns (a view of) the sparsity pattern of the Hessian of
476 /// the augmented Lagrangian.
477 ///
478 /// Required for second-order solvers only.
480
481 /// @}
482
483 /// @name Combined evaluations
484 /// @{
485
486 /// **[Optional]**
487 /// Evaluate both @f$ f(x) @f$ and its gradient, @f$ \nabla f(x) @f$.
488 /// @default_impl ProblemVTable::default_eval_f_grad_f
490 /// **[Optional]**
491 /// Evaluate both @f$ f(x) @f$ and @f$ g(x) @f$.
492 /// @default_impl ProblemVTable::default_eval_f_g
494 /// **[Optional]**
495 /// Evaluate both @f$ \nabla f(x) @f$ and @f$ \nabla g(x)\,y @f$.
496 /// @default_impl ProblemVTable::default_eval_grad_f_grad_g_prod
498 /// **[Optional]**
499 /// Evaluate the gradient of the Lagrangian
500 /// @f$ \nabla_x L(x, y) = \nabla f(x) + \nabla g(x)\,y @f$
501 /// @default_impl ProblemVTable::default_eval_grad_L
502 void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const;
503
504 /// @}
505
506 /// @name Augmented Lagrangian
507 /// @{
508
509 /// **[Optional]**
510 /// Calculate both ψ(x) and the vector ŷ that can later be used to compute
511 /// ∇ψ.
512 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
513 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
514 /// @f[ \hat y = \Sigma\, \left(g(x) + \Sigma^{-1}y - \Pi_D\left(g(x)
515 /// + \Sigma^{-1}y\right)\right) @f]
516 /// @default_impl ProblemVTable::default_eval_ψ
517 [[nodiscard]] real_t eval_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
518 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
519 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
520 rvec ŷ ///< [out] @f$ \hat y @f$
521 ) const;
522 /// **[Optional]**
523 /// Calculate the gradient ∇ψ(x).
524 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
525 /// @default_impl ProblemVTable::default_eval_grad_ψ
526 void eval_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
527 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
528 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
529 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
530 rvec work_n, ///< Dimension @f$ n @f$
531 rvec work_m ///< Dimension @f$ m @f$
532 ) const;
533 /// **[Optional]**
534 /// Calculate both ψ(x) and its gradient ∇ψ(x).
535 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
536 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
537 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
538 /// @default_impl ProblemVTable::default_eval_ψ_grad_ψ
539 [[nodiscard]] real_t eval_ψ_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
540 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
541 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
542 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
543 rvec work_n, ///< Dimension @f$ n @f$
544 rvec work_m ///< Dimension @f$ m @f$
545 ) const;
546
547 /// @}
548
549 /// @name Checks
550 /// @{
551
552 /// **[Optional]**
553 /// Check that the problem formulation is well-defined, the dimensions match,
554 /// etc. Throws an exception if this is not the case.
555 void check() const;
556
557 /// @}
558
559 /// @name Querying specialized implementations
560 /// @{
561
562 /// Returns true if the problem provides an implementation of
563 /// @ref eval_inactive_indices_res_lna.
565 return vtable.eval_inactive_indices_res_lna != vtable.default_eval_inactive_indices_res_lna;
566 }
567 /// Returns true if the problem provides an implementation of
568 /// @ref eval_jac_g.
570 return vtable.eval_jac_g != vtable.default_eval_jac_g;
571 }
572 /// Returns true if the problem provides an implementation of
573 /// @ref get_jac_g_sparsity.
575 return vtable.get_jac_g_sparsity != vtable.default_get_jac_g_sparsity;
576 }
577 /// Returns true if the problem provides an implementation of
578 /// @ref eval_grad_gi.
580 return vtable.eval_grad_gi != vtable.default_eval_grad_gi;
581 }
582 /// Returns true if the problem provides an implementation of
583 /// @ref eval_hess_L_prod.
585 return vtable.eval_hess_L_prod != vtable.default_eval_hess_L_prod;
586 }
587 /// Returns true if the problem provides an implementation of
588 /// @ref eval_hess_L.
590 return vtable.eval_hess_L != vtable.default_eval_hess_L;
591 }
592 /// Returns true if the problem provides an implementation of
593 /// @ref get_hess_L_sparsity.
595 return vtable.get_hess_L_sparsity != vtable.default_get_hess_L_sparsity;
596 }
597 /// Returns true if the problem provides an implementation of
598 /// @ref eval_hess_ψ_prod.
600 return vtable.eval_hess_ψ_prod != vtable.default_eval_hess_ψ_prod;
601 }
602 /// Returns true if the problem provides an implementation of
603 /// @ref eval_hess_ψ.
605 return vtable.eval_hess_ψ != vtable.default_eval_hess_ψ;
606 }
607 /// Returns true if the problem provides an implementation of
608 /// @ref get_hess_ψ_sparsity.
610 return vtable.get_hess_ψ_sparsity != vtable.default_get_hess_ψ_sparsity;
611 }
612 /// Returns true if the problem provides a specialized implementation of
613 /// @ref eval_f_grad_f, false if it uses the default implementation.
615 return vtable.eval_f_grad_f != vtable.default_eval_f_grad_f;
616 }
617 /// Returns true if the problem provides a specialized implementation of
618 /// @ref eval_f_g, false if it uses the default implementation.
619 [[nodiscard]] bool provides_eval_f_g() const {
620 return vtable.eval_f_g != vtable.default_eval_f_g;
621 }
622 /// Returns true if the problem provides a specialized implementation of
623 /// @ref eval_grad_f_grad_g_prod, false if it uses the default implementation.
625 return vtable.eval_grad_f_grad_g_prod != vtable.default_eval_grad_f_grad_g_prod;
626 }
627 /// Returns true if the problem provides a specialized implementation of
628 /// @ref eval_grad_L, false if it uses the default implementation.
630 return vtable.eval_grad_L != vtable.default_eval_grad_L;
631 }
632 /// Returns true if the problem provides a specialized implementation of
633 /// @ref eval_ψ, false if it uses the default implementation.
634 [[nodiscard]] bool provides_eval_ψ() const { return vtable.eval_ψ != vtable.default_eval_ψ; }
635 /// Returns true if the problem provides a specialized implementation of
636 /// @ref eval_grad_ψ, false if it uses the default implementation.
638 return vtable.eval_grad_ψ != vtable.default_eval_grad_ψ;
639 }
640 /// Returns true if the problem provides a specialized implementation of
641 /// @ref eval_ψ_grad_ψ, false if it uses the default implementation.
643 return vtable.eval_ψ_grad_ψ != vtable.default_eval_ψ_grad_ψ;
644 }
645 /// Returns true if the problem provides an implementation of
646 /// @ref get_box_C.
647 [[nodiscard]] bool provides_get_box_C() const {
648 return vtable.get_box_C != vtable.default_get_box_C;
649 }
650 /// Returns true if the problem provides an implementation of
651 /// @ref get_box_D.
652 [[nodiscard]] bool provides_get_box_D() const {
653 return vtable.get_box_D != vtable.default_get_box_D;
654 }
655 /// Returns true if the problem provides an implementation of @ref check.
656 [[nodiscard]] bool provides_check() const { return vtable.check != vtable.default_check; }
657
658 /// @}
659
660 /// @name Helpers
661 /// @{
662
663 /// Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be
664 /// used to compute ψ(x) and ∇ψ(x).
665 ///
666 /// Computes the result using the following algorithm:
667 /// @f[ \begin{aligned}
668 /// \zeta &= g(x) + \Sigma^{-1} y \\[]
669 /// d &= \zeta - \Pi_D(\zeta)
670 /// = \operatorname{eval\_proj\_diff\_g}(\zeta, \zeta) \\[]
671 /// \hat y &= \Sigma d \\[]
672 /// \end{aligned} @f]
673 /// @see @ref page_math
674 ///
675 /// @param[inout] g_ŷ
676 /// Input @f$ g(x) @f$, outputs @f$ \hat y @f$
677 /// @param[in] y
678 /// Lagrange multipliers @f$ y @f$
679 /// @param[in] Σ
680 /// Penalty weights @f$ \Sigma @f$
681 /// @return The inner product @f$ d^\top \hat y @f$
683
684 /// @}
685};
686
687/// @}
688
689#ifndef DOXYGEN
690template <class Tref>
691explicit TypeErasedProblem(Tref &&d)
693
694template <class Tref, class Allocator>
697#endif
698
699template <Config Conf, class Allocator>
701 return vtable.n;
702}
703template <Config Conf, class Allocator>
705 return vtable.m;
706}
707
708template <Config Conf, class Allocator>
710 return call(vtable.eval_proj_diff_g, z, e);
711}
712template <Config Conf, class Allocator>
714 return call(vtable.eval_proj_multipliers, y, M);
715}
716template <Config Conf, class Allocator>
718 rvec x̂, rvec p) const -> real_t {
719 return call(vtable.eval_prox_grad_step, γ, x, grad_ψ, x̂, p);
720}
721template <Config Conf, class Allocator>
723 crvec grad_ψ,
724 rindexvec J) const
725 -> index_t {
726 return call(vtable.eval_inactive_indices_res_lna, γ, x, grad_ψ, J);
727}
728template <Config Conf, class Allocator>
730 return call(vtable.eval_f, x);
731}
732template <Config Conf, class Allocator>
734 return call(vtable.eval_grad_f, x, grad_fx);
735}
736template <Config Conf, class Allocator>
738 return call(vtable.eval_g, x, gx);
739}
740template <Config Conf, class Allocator>
742 return call(vtable.eval_grad_g_prod, x, y, grad_gxy);
743}
744template <Config Conf, class Allocator>
746 return call(vtable.eval_grad_gi, x, i, grad_gi);
747}
748template <Config Conf, class Allocator>
750 return call(vtable.eval_jac_g, x, J_values);
751}
752template <Config Conf, class Allocator>
754 return call(vtable.get_jac_g_sparsity);
755}
756template <Config Conf, class Allocator>
758 rvec Hv) const {
759 return call(vtable.eval_hess_L_prod, x, y, scale, v, Hv);
760}
761template <Config Conf, class Allocator>
763 rvec H_values) const {
764 return call(vtable.eval_hess_L, x, y, scale, H_values);
765}
766template <Config Conf, class Allocator>
768 return call(vtable.get_hess_L_sparsity);
769}
770template <Config Conf, class Allocator>
772 crvec v, rvec Hv) const {
773 return call(vtable.eval_hess_ψ_prod, x, y, Σ, scale, v, Hv);
774}
775template <Config Conf, class Allocator>
777 rvec H_values) const {
778 return call(vtable.eval_hess_ψ, x, y, Σ, scale, H_values);
779}
780template <Config Conf, class Allocator>
782 return call(vtable.get_hess_ψ_sparsity);
783}
784template <Config Conf, class Allocator>
786 return call(vtable.eval_f_grad_f, x, grad_fx);
787}
788template <Config Conf, class Allocator>
790 return call(vtable.eval_f_g, x, g);
791}
792template <Config Conf, class Allocator>
794 rvec grad_gxy) const {
795 return call(vtable.eval_grad_f_grad_g_prod, x, y, grad_f, grad_gxy);
796}
797template <Config Conf, class Allocator>
799 rvec work_n) const {
800 return call(vtable.eval_grad_L, x, y, grad_L, work_n);
801}
802template <Config Conf, class Allocator>
804 return call(vtable.eval_ψ, x, y, Σ, ŷ);
805}
806template <Config Conf, class Allocator>
808 rvec work_n, rvec work_m) const {
809 return call(vtable.eval_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
810}
811template <Config Conf, class Allocator>
813 rvec work_n, rvec work_m) const -> real_t {
814 return call(vtable.eval_ψ_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
815}
816template <Config Conf, class Allocator>
818 return call(vtable.calc_ŷ_dᵀŷ, g_ŷ, y, Σ);
819}
820template <Config Conf, class Allocator>
822 return call(vtable.get_box_C);
823}
824template <Config Conf, class Allocator>
826 return call(vtable.get_box_D);
827}
828template <Config Conf, class Allocator>
830 return call(vtable.check);
831}
832
833/// @addtogroup grp_Problems
834/// @{
835
836template <Config Conf>
837void print_provided_functions(std::ostream &os, const TypeErasedProblem<Conf> &problem) {
838 os << "inactive_indices_res_lna: " << problem.provides_eval_inactive_indices_res_lna() << '\n'
839 << " grad_gi: " << problem.provides_eval_grad_gi() << '\n'
840 << " jac_g: " << problem.provides_eval_jac_g() << '\n'
841 << " hess_L_prod: " << problem.provides_eval_hess_L_prod() << '\n'
842 << " hess_L: " << problem.provides_eval_hess_L() << '\n'
843 << " hess_ψ_prod: " << problem.provides_eval_hess_ψ_prod() << '\n'
844 << " hess_ψ: " << problem.provides_eval_hess_ψ() << '\n'
845 << " f_grad_f: " << problem.provides_eval_f_grad_f() << '\n'
846 << " f_g: " << problem.provides_eval_f_g() << '\n'
847 << " grad_f_grad_g_prod: " << problem.provides_eval_grad_f_grad_g_prod() << '\n'
848 << " grad_L: " << problem.provides_eval_grad_L() << '\n'
849 << " ψ: " << problem.provides_eval_ψ() << '\n'
850 << " grad_ψ: " << problem.provides_eval_grad_ψ() << '\n'
851 << " ψ_grad_ψ: " << problem.provides_eval_ψ_grad_ψ() << '\n'
852 << " get_box_C: " << problem.provides_get_box_C() << '\n'
853 << " get_box_D: " << problem.provides_get_box_D() << '\n'
854 << " check: " << problem.provides_check() << '\n';
855}
856
857/// @}
858
859} // namespace alpaqa
The main polymorphic minimization problem interface.
bool provides_eval_hess_L() const
Returns true if the problem provides an implementation of eval_hess_L.
real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const
[Required] Function that computes a proximal gradient step.
real_t eval_ψ_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate both ψ(x) and its gradient ∇ψ(x).
bool provides_get_hess_L_sparsity() const
Returns true if the problem provides an implementation of get_hess_L_sparsity.
const Box & get_box_D() const
[Optional] Get the rectangular constraint set of the general constraint function, .
void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const
[Optional] Function that evaluates the gradient of one specific constraint,
bool provides_eval_hess_ψ_prod() const
Returns true if the problem provides an implementation of eval_hess_ψ_prod.
bool provides_eval_ψ_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ_grad_ψ,...
bool provides_get_box_C() const
Returns true if the problem provides an implementation of get_box_C.
void eval_jac_g(crvec x, rvec J_values) const
[Optional] Function that evaluates the nonzero values of the Jacobian matrix of the constraints,
Sparsity get_jac_g_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Jacobian of the constraints.
real_t eval_f_g(crvec x, rvec g) const
[Optional] Evaluate both and .
Sparsity get_hess_ψ_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the augmented Lag...
bool provides_eval_jac_g() const
Returns true if the problem provides an implementation of eval_jac_g.
bool provides_check() const
Returns true if the problem provides an implementation of check.
length_t get_n() const
[Required] Number of decision variables.
void eval_hess_ψ(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the augmented Lagrangian,
Sparsity get_hess_L_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the Lagrangian.
void check() const
[Optional] Check that the problem formulation is well-defined, the dimensions match,...
length_t get_m() const
[Required] Number of constraints.
real_t eval_ψ(crvec x, crvec y, crvec Σ, rvec ŷ) const
[Optional] Calculate both ψ(x) and the vector ŷ that can later be used to compute ∇ψ.
bool provides_eval_inactive_indices_res_lna() const
Returns true if the problem provides an implementation of eval_inactive_indices_res_lna.
void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const
[Optional] Evaluate the gradient of the Lagrangian
void eval_grad_f_grad_g_prod(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const
[Optional] Evaluate both and .
bool provides_eval_grad_f_grad_g_prod() const
Returns true if the problem provides a specialized implementation of eval_grad_f_grad_g_prod,...
static TypeErasedProblem make(Args &&...args)
index_t eval_inactive_indices_res_lna(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const
[Optional] Function that computes the inactive indices for the evaluation of the linear Newton appro...
bool provides_get_hess_ψ_sparsity() const
Returns true if the problem provides an implementation of get_hess_ψ_sparsity.
bool provides_eval_hess_L_prod() const
Returns true if the problem provides an implementation of eval_hess_L_prod.
bool provides_get_jac_g_sparsity() const
Returns true if the problem provides an implementation of get_jac_g_sparsity.
real_t eval_f_grad_f(crvec x, rvec grad_fx) const
[Optional] Evaluate both and its gradient, .
bool provides_eval_f_grad_f() const
Returns true if the problem provides a specialized implementation of eval_f_grad_f,...
void eval_grad_g_prod(crvec x, crvec y, rvec grad_gxy) const
[Required] Function that evaluates the gradient of the constraints times a vector,
void eval_hess_L_prod(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the Lagrangian multiplied by a vector,
bool provides_eval_grad_gi() const
Returns true if the problem provides an implementation of eval_grad_gi.
void eval_proj_multipliers(rvec y, real_t M) const
[Required] Function that projects the Lagrange multipliers for ALM.
bool provides_eval_f_g() const
Returns true if the problem provides a specialized implementation of eval_f_g, false if it uses the d...
void eval_grad_f(crvec x, rvec grad_fx) const
[Required] Function that evaluates the gradient of the cost,
real_t eval_f(crvec x) const
[Required] Function that evaluates the cost,
bool provides_eval_grad_L() const
Returns true if the problem provides a specialized implementation of eval_grad_L, false if it uses th...
bool provides_eval_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_grad_ψ, false if it uses th...
void eval_g(crvec x, rvec gx) const
[Required] Function that evaluates the constraints,
void eval_hess_L(crvec x, crvec y, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the Lagrangian,
bool provides_eval_hess_ψ() const
Returns true if the problem provides an implementation of eval_hess_ψ.
real_t calc_ŷ_dᵀŷ(rvec g_ŷ, crvec y, crvec Σ) const
Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be used to compute ψ(x) and ∇ψ(...
bool provides_get_box_D() const
Returns true if the problem provides an implementation of get_box_D.
const Box & get_box_C() const
[Optional] Get the rectangular constraint set of the decision variables, .
void eval_proj_diff_g(crvec z, rvec e) const
[Required] Function that evaluates the difference between the given point and its projection onto th...
void eval_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate the gradient ∇ψ(x).
bool provides_eval_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ, false if it uses the def...
void eval_hess_ψ_prod(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the augmented Lagrangian multiplied by a vector,
Class for polymorphism through type erasure.
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:56
#define ALPAQA_IF_QUADF(...)
Definition config.hpp:182
#define ALPAQA_IF_LONGD(...)
Definition config.hpp:194
#define ALPAQA_IF_FLOAT(...)
Definition config.hpp:188
#define ALPAQA_EXPORT_EXTERN_TEMPLATE(...)
Definition export.hpp:21
void print_provided_functions(std::ostream &os, const TypeErasedProblem< Conf > &problem)
typename Conf::real_t real_t
Definition config.hpp:65
typename Conf::rindexvec rindexvec
Definition config.hpp:79
typename Conf::index_t index_t
Definition config.hpp:77
typename Conf::length_t length_t
Definition config.hpp:76
constexpr const auto inf
Definition config.hpp:85
typename Conf::rvec rvec
Definition config.hpp:69
typename Conf::crvec crvec
Definition config.hpp:70
#define ALPAQA_TE_OPTIONAL_METHOD(vtable, type, member, instance)
#define ALPAQA_TE_REQUIRED_METHOD(vtable, type, member)
Double-precision double configuration.
Definition config.hpp:135
Single-precision float configuration.
Definition config.hpp:131
long double configuration.
Definition config.hpp:140
Struct containing function pointers to all problem functions (like the objective and constraint funct...
required_const_function_t< real_t(crvec x)> eval_f
util::BasicVTable::optional_const_function_t< F, ProblemVTable > optional_const_function_t
optional_const_function_t< void(crvec x, crvec y, real_t scale, crvec v, rvec Hv)> eval_hess_L_prod
required_const_function_t< real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p)> eval_prox_grad_step
static real_t default_eval_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec ŷ, const ProblemVTable &vtable)
optional_const_function_t< real_t(crvec x, rvec grad_fx)> eval_f_grad_f
optional_const_function_t< Sparsity()> get_hess_ψ_sparsity
optional_const_function_t< void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)> eval_grad_ψ
optional_const_function_t< Sparsity()> get_hess_L_sparsity
static void default_eval_hess_L_prod(const void *, crvec, crvec, real_t, crvec, rvec, const ProblemVTable &)
optional_const_function_t< void(crvec x, crvec y, rvec grad_L, rvec work_n)> eval_grad_L
static void default_eval_hess_ψ_prod(const void *self, crvec x, crvec y, crvec, real_t scale, crvec v, rvec Hv, const ProblemVTable &vtable)
optional_const_function_t< real_t(crvec x, crvec y, crvec Σ, rvec ŷ)> eval_ψ
optional_const_function_t< index_t(real_t γ, crvec x, crvec grad_ψ, rindexvec J)> eval_inactive_indices_res_lna
ProblemVTable(std::in_place_t, P &p)
optional_const_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv)> eval_hess_ψ_prod
static void default_eval_jac_g(const void *, crvec, rvec, const ProblemVTable &)
optional_const_function_t< void()> check
optional_const_function_t< real_t(crvec x, rvec g)> eval_f_g
static void default_eval_grad_gi(const void *, crvec, index_t, rvec, const ProblemVTable &)
optional_const_function_t< real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m)> eval_ψ_grad_ψ
util::BasicVTable::optional_function_t< F, ProblemVTable > optional_function_t
optional_const_function_t< void(crvec x, crvec y, real_t scale, rvec H_values)> eval_hess_L
optional_const_function_t< void(crvec x, rvec J_values)> eval_jac_g
static void default_eval_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
required_const_function_t< void(crvec x, rvec gx)> eval_g
optional_const_function_t< const Box &()> get_box_D
static const Box & default_get_box_C(const void *, const ProblemVTable &)
alpaqa::Sparsity< config_t > Sparsity
static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L, rvec work_n, const ProblemVTable &vtable)
static const Box & default_get_box_D(const void *, const ProblemVTable &)
optional_const_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values)> eval_hess_ψ
optional_const_function_t< void(crvec x, crvec y, rvec grad_f, rvec grad_gxy)> eval_grad_f_grad_g_prod
static Sparsity default_get_jac_g_sparsity(const void *, const ProblemVTable &)
optional_const_function_t< Sparsity()> get_jac_g_sparsity
required_const_function_t< void(crvec z, rvec e)> eval_proj_diff_g
static void default_eval_hess_ψ(const void *self, crvec x, crvec y, crvec, real_t scale, rvec H_values, const ProblemVTable &vtable)
static real_t default_eval_f_g(const void *self, crvec x, rvec g, const ProblemVTable &vtable)
optional_const_function_t< void(crvec x, index_t i, rvec grad_gi)> eval_grad_gi
static index_t default_eval_inactive_indices_res_lna(const void *, real_t, crvec, crvec, rindexvec, const ProblemVTable &)
static void default_check(const void *, const ProblemVTable &)
static void default_eval_hess_L(const void *, crvec, crvec, real_t, rvec, const ProblemVTable &)
static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ, const ProblemVTable &vtable)
static void default_eval_grad_f_grad_g_prod(const void *self, crvec x, crvec y, rvec grad_f, rvec grad_gxy, const ProblemVTable &vtable)
required_const_function_t< void(crvec x, crvec y, rvec grad_gxy)> eval_grad_g_prod
static real_t default_eval_f_grad_f(const void *self, crvec x, rvec grad_fx, const ProblemVTable &vtable)
required_const_function_t< void(rvec y, real_t M)> eval_proj_multipliers
required_const_function_t< void(crvec x, rvec grad_fx)> eval_grad_f
static Sparsity default_get_hess_L_sparsity(const void *, const ProblemVTable &)
static Sparsity default_get_hess_ψ_sparsity(const void *, const ProblemVTable &)
static real_t default_eval_ψ_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_const_function_t< const Box &()> get_box_C
Stores any of the supported sparsity patterns.
Definition sparsity.hpp:105
Struct that stores the size of a polymorphic object, as well as pointers to functions to copy,...
typename optional_function< F, VTable >::type optional_function_t
An optional function includes a void pointer to self, the arguments of F, and an additional reference...
typename required_const_function< F >::type required_const_function_t
A required function includes a void pointer to self, in addition to the arguments of F.
typename optional_const_function< F, VTable >::type optional_const_function_t
An optional function includes a void pointer to self, the arguments of F, and an additional reference...