alpaqa 1.0.0a15
Nonconvex constrained optimization
Loading...
Searching...
No Matches
type-erased-problem.hpp
Go to the documentation of this file.
1#pragma once
2
4#include <alpaqa/export.hpp>
12#include <chrono>
13#include <stdexcept>
14#include <type_traits>
15#include <utility>
16
17namespace alpaqa {
18
19/// Struct containing function pointers to all problem functions (like the
20/// objective and constraint functions, with their derivatives, and more).
21/// Some default implementations are available.
22/// Internal struct, it is used by @ref TypeErasedProblem.
23template <Config Conf>
28
29 template <class F>
31
32 // clang-format off
33
34 // Required
39 required_function_t<real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const>
51
52 // Second order
57 optional_function_t<void(crvec x, index_t i, rvec grad_gi) const>
71
72 // Combined evaluations
79
80 // Lagrangian and augmented lagrangian evaluations
81 optional_function_t<void(crvec x, crvec y, rvec grad_L, rvec work_n) const>
85 optional_function_t<void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
87 optional_function_t<real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
89
90 // Constraint sets
95
96 // Check
99
100 // clang-format on
101
103 const ProblemVTable &vtable);
118 const ProblemVTable &vtable);
121 const ProblemVTable &vtable);
124 const ProblemVTable &vtable);
126 const ProblemVTable &vtable);
128 rvec grad_f, rvec grad_gxy,
129 const ProblemVTable &vtable);
130 ALPAQA_EXPORT static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L,
131 rvec work_n, const ProblemVTable &vtable);
133 const ProblemVTable &vtable);
135 rvec grad_ψ, rvec work_n, rvec work_m,
136 const ProblemVTable &vtable);
138 rvec grad_ψ, rvec work_n, rvec work_m,
139 const ProblemVTable &vtable);
143
145
146 template <class P>
147 ProblemVTable(std::in_place_t, P &p) : util::BasicVTable{std::in_place, p} {
148 auto &vtable = *this;
149
150 // Initialize all methods
151
152 // Required
161 // Second order
171 // Combined evaluations
173 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_f_g, p);
175 // Lagrangian and augmented lagrangian evaluations
177 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_ψ, p);
180 // Constraint set
183 // Check
184 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, check, p);
185
186 // Dimensions
187 vtable.n = p.get_n();
188 vtable.m = p.get_m();
189 }
190 ProblemVTable() = default;
191};
192
193// clang-format off
198// clang-format on
199
200/// @addtogroup grp_Problems
201/// @{
202
203/// The main polymorphic minimization problem interface.
204///
205/// This class wraps the actual problem implementation class, filling in the
206/// missing member functions with sensible defaults, and providing a uniform
207/// interface that is used by the solvers.
208///
209/// The problem implementations do not inherit from an abstract base class.
210/// Instead, [structural typing](https://en.wikipedia.org/wiki/Structural_type_system)
211/// is used. The @ref ProblemVTable constructor uses reflection to discover
212/// which member functions are provided by the problem implementation. See
213/// @ref page-problem-formulations for more information, and
214/// @ref C++/CustomCppProblem/main.cpp for an example.
215template <Config Conf = DefaultConfig, class Allocator = std::allocator<std::byte>>
217 public:
224 using TypeErased::TypeErased;
225
226 protected:
227 using TypeErased::call;
228 using TypeErased::self;
229 using TypeErased::vtable;
230
231 public:
232 template <class T, class... Args>
234 return TypeErased::template make<TypeErasedProblem, T>(std::forward<Args>(args)...);
235 }
236
237 /// @name Problem dimensions
238 /// @{
239
240 /// **[Required]**
241 /// Number of decision variables.
243 /// **[Required]**
244 /// Number of constraints.
246
247 /// @}
248
249 /// @name Required cost and constraint functions
250 /// @{
251
252 /// **[Required]**
253 /// Function that evaluates the cost, @f$ f(x) @f$
254 /// @param [in] x
255 /// Decision variable @f$ x \in \R^n @f$
257 /// **[Required]**
258 /// Function that evaluates the gradient of the cost, @f$ \nabla f(x) @f$
259 /// @param [in] x
260 /// Decision variable @f$ x \in \R^n @f$
261 /// @param [out] grad_fx
262 /// Gradient of cost function @f$ \nabla f(x) \in \R^n @f$
264 /// **[Required]**
265 /// Function that evaluates the constraints, @f$ g(x) @f$
266 /// @param [in] x
267 /// Decision variable @f$ x \in \R^n @f$
268 /// @param [out] gx
269 /// Value of the constraints @f$ g(x) \in \R^m @f$
270 void eval_g(crvec x, rvec gx) const;
271 /// **[Required]**
272 /// Function that evaluates the gradient of the constraints times a vector,
273 /// @f$ \nabla g(x)\,y = \tp{\jac_g(x)}y @f$
274 /// @param [in] x
275 /// Decision variable @f$ x \in \R^n @f$
276 /// @param [in] y
277 /// Vector @f$ y \in \R^m @f$ to multiply the gradient by
278 /// @param [out] grad_gxy
279 /// Gradient of the constraints
280 /// @f$ \nabla g(x)\,y \in \R^n @f$
282
283 /// @}
284
285 /// @name Projections onto constraint sets and proximal mappings
286 /// @{
287
288 /// **[Required]**
289 /// Function that evaluates the difference between the given point @f$ z @f$
290 /// and its projection onto the constraint set @f$ D @f$.
291 /// @param [in] z
292 /// Slack variable, @f$ z \in \R^m @f$
293 /// @param [out] e
294 /// The difference relative to its projection,
295 /// @f$ e = z - \Pi_D(z) \in \R^m @f$
296 /// @note @p z and @p e can refer to the same vector.
297 void eval_proj_diff_g(crvec z, rvec e) const;
298 /// **[Required]**
299 /// Function that projects the Lagrange multipliers for ALM.
300 /// @param [inout] y
301 /// Multipliers, @f$ y \leftarrow \Pi_Y(y) \in \R^m @f$
302 /// @param [in] M
303 /// The radius/size of the set @f$ Y @f$.
304 /// See @ref ALMParams::max_multiplier.
306 /// **[Required]**
307 /// Function that computes a proximal gradient step.
308 /// @param [in] γ
309 /// Step size, @f$ \gamma \in \R_{>0} @f$
310 /// @param [in] x
311 /// Decision variable @f$ x \in \R^n @f$
312 /// @param [in] grad_ψ
313 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
314 /// @param [out] x̂
315 /// Next proximal gradient iterate, @f$ \hat x = T_\gamma(x) =
316 /// \prox_{\gamma h}(x - \gamma\nabla\psi(x)) \in \R^n @f$
317 /// @param [out] p
318 /// The proximal gradient step,
319 /// @f$ p = \hat x - x \in \R^n @f$
320 /// @return The nonsmooth function evaluated at x̂,
321 /// @f$ h(\hat x) @f$.
322 /// @note The vector @f$ p @f$ is often used in stopping criteria, so its
323 /// numerical accuracy is more important than that of @f$ \hat x @f$.
324 real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const;
325 /// **[Optional]**
326 /// Function that computes the inactive indices @f$ \mathcal J(x) @f$ for
327 /// the evaluation of the linear Newton approximation of the residual, as in
328 /// @cite pas2022alpaqa.
329 /// @param [in] γ
330 /// Step size, @f$ \gamma \in \R_{>0} @f$
331 /// @param [in] x
332 /// Decision variable @f$ x \in \R^n @f$
333 /// @param [in] grad_ψ
334 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
335 /// @param [out] J
336 /// The indices of the components of @f$ x @f$ that are in the
337 /// index set @f$ \mathcal J(x) @f$. In ascending order, at most n.
338 /// @return The number of inactive constraints, @f$ \# \mathcal J(x) @f$.
339 ///
340 /// For example, in the case of box constraints, we have
341 /// @f[ \mathcal J(x) \defeq \defset{i \in \N_{[0, n-1]}}{\underline x_i
342 /// \lt x_i - \gamma\nabla_{\!x_i}\psi(x) \lt \overline x_i}. @f]
344 rindexvec J) const;
345
346 /// @}
347
348 /// @name Constraint sets
349 /// @{
350
351 /// **[Optional]**
352 /// Get the rectangular constraint set of the decision variables,
353 /// @f$ x \in C @f$.
354 [[nodiscard]] const Box &get_box_C() const;
355 /// **[Optional]**
356 /// Get the rectangular constraint set of the general constraint function,
357 /// @f$ g(x) \in D @f$.
358 [[nodiscard]] const Box &get_box_D() const;
359
360 /// @}
361
362 /// @name Functions for second-order solvers
363 /// @{
364
365 /// **[Optional]**
366 /// Function that evaluates the nonzero values of the Jacobian matrix of the
367 /// constraints, @f$ \jac_g(x) @f$
368 /// @param [in] x
369 /// Decision variable @f$ x \in \R^n @f$
370 /// @param [out] J_values
371 /// Nonzero values of the Jacobian
372 /// @f$ \jac_g(x) \in \R^{m\times n} @f$
373 ///
374 /// Required for second-order solvers only.
376 /// **[Optional]**
377 /// Function that returns (a view of) the sparsity pattern of the Jacobian
378 /// of the constraints.
379 ///
380 /// Required for second-order solvers only.
382 /// **[Optional]**
383 /// Function that evaluates the gradient of one specific constraint,
384 /// @f$ \nabla g_i(x) @f$
385 /// @param [in] x
386 /// Decision variable @f$ x \in \R^n @f$
387 /// @param [in] i
388 /// Which constraint @f$ 0 \le i \lt m @f$
389 /// @param [out] grad_gi
390 /// Gradient of the constraint
391 /// @f$ \nabla g_i(x) \in \R^n @f$
392 ///
393 /// Required for second-order solvers only.
394 void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const;
395 /// **[Optional]**
396 /// Function that evaluates the Hessian of the Lagrangian multiplied by a
397 /// vector,
398 /// @f$ \nabla_{xx}^2L(x, y)\,v @f$
399 /// @param [in] x
400 /// Decision variable @f$ x \in \R^n @f$
401 /// @param [in] y
402 /// Lagrange multipliers @f$ y \in \R^m @f$
403 /// @param [in] scale
404 /// Scale factor for the cost function.
405 /// @param [in] v
406 /// Vector to multiply by @f$ v \in \R^n @f$
407 /// @param [out] Hv
408 /// Hessian-vector product
409 /// @f$ \nabla_{xx}^2 L(x, y)\,v \in \R^{n} @f$
410 ///
411 /// Required for second-order solvers only.
413 /// **[Optional]**
414 /// Function that evaluates the nonzero values of the Hessian of the
415 /// Lagrangian, @f$ \nabla_{xx}^2L(x, y) @f$
416 /// @param [in] x
417 /// Decision variable @f$ x \in \R^n @f$
418 /// @param [in] y
419 /// Lagrange multipliers @f$ y \in \R^m @f$
420 /// @param [in] scale
421 /// Scale factor for the cost function.
422 /// @param [out] H_values
423 /// Nonzero values of the Hessian
424 /// @f$ \nabla_{xx}^2 L(x, y) \in \R^{n\times n} @f$.
425 ///
426 /// Required for second-order solvers only.
428 /// **[Optional]**
429 /// Function that returns (a view of) the sparsity pattern of the Hessian of
430 /// the Lagrangian.
431 ///
432 /// Required for second-order solvers only.
434 /// **[Optional]**
435 /// Function that evaluates the Hessian of the augmented Lagrangian
436 /// multiplied by a vector,
437 /// @f$ \nabla_{xx}^2L_\Sigma(x, y)\,v @f$
438 /// @param [in] x
439 /// Decision variable @f$ x \in \R^n @f$
440 /// @param [in] y
441 /// Lagrange multipliers @f$ y \in \R^m @f$
442 /// @param [in] Σ
443 /// Penalty weights @f$ \Sigma @f$
444 /// @param [in] scale
445 /// Scale factor for the cost function.
446 /// @param [in] v
447 /// Vector to multiply by @f$ v \in \R^n @f$
448 /// @param [out] Hv
449 /// Hessian-vector product
450 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y)\,v \in \R^{n} @f$
451 ///
452 /// Required for second-order solvers only.
454 /// **[Optional]**
455 /// Function that evaluates the nonzero values of the Hessian of the
456 /// augmented Lagrangian, @f$ \nabla_{xx}^2L_\Sigma(x, y) @f$
457 /// @param [in] x
458 /// Decision variable @f$ x \in \R^n @f$
459 /// @param [in] y
460 /// Lagrange multipliers @f$ y \in \R^m @f$
461 /// @param [in] Σ
462 /// Penalty weights @f$ \Sigma @f$
463 /// @param [in] scale
464 /// Scale factor for the cost function.
465 /// @param [out] H_values
466 /// Nonzero values of the Hessian
467 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y) \in \R^{n\times n} @f$
468 ///
469 /// Required for second-order solvers only.
471 /// **[Optional]**
472 /// Function that returns (a view of) the sparsity pattern of the Hessian of
473 /// the augmented Lagrangian.
474 ///
475 /// Required for second-order solvers only.
477
478 /// @}
479
480 /// @name Combined evaluations
481 /// @{
482
483 /// **[Optional]**
484 /// Evaluate both @f$ f(x) @f$ and its gradient, @f$ \nabla f(x) @f$.
485 /// @default_impl ProblemVTable::default_eval_f_grad_f
487 /// **[Optional]**
488 /// Evaluate both @f$ f(x) @f$ and @f$ g(x) @f$.
489 /// @default_impl ProblemVTable::default_eval_f_g
491 /// **[Optional]**
492 /// Evaluate both @f$ \nabla f(x) @f$ and @f$ \nabla g(x)\,y @f$.
493 /// @default_impl ProblemVTable::default_eval_grad_f_grad_g_prod
495 /// **[Optional]**
496 /// Evaluate the gradient of the Lagrangian
497 /// @f$ \nabla_x L(x, y) = \nabla f(x) + \nabla g(x)\,y @f$
498 /// @default_impl ProblemVTable::default_eval_grad_L
499 void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const;
500
501 /// @}
502
503 /// @name Augmented Lagrangian
504 /// @{
505
506 /// **[Optional]**
507 /// Calculate both ψ(x) and the vector ŷ that can later be used to compute
508 /// ∇ψ.
509 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
510 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
511 /// @f[ \hat y = \Sigma\, \left(g(x) + \Sigma^{-1}y - \Pi_D\left(g(x)
512 /// + \Sigma^{-1}y\right)\right) @f]
513 /// @default_impl ProblemVTable::default_eval_ψ
514 [[nodiscard]] real_t eval_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
515 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
516 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
517 rvec ŷ ///< [out] @f$ \hat y @f$
518 ) const;
519 /// **[Optional]**
520 /// Calculate the gradient ∇ψ(x).
521 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
522 /// @default_impl ProblemVTable::default_eval_grad_ψ
523 void eval_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
524 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
525 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
526 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
527 rvec work_n, ///< Dimension @f$ n @f$
528 rvec work_m ///< Dimension @f$ m @f$
529 ) const;
530 /// **[Optional]**
531 /// Calculate both ψ(x) and its gradient ∇ψ(x).
532 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
533 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
534 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
535 /// @default_impl ProblemVTable::default_eval_ψ_grad_ψ
536 [[nodiscard]] real_t eval_ψ_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
537 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
538 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
539 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
540 rvec work_n, ///< Dimension @f$ n @f$
541 rvec work_m ///< Dimension @f$ m @f$
542 ) const;
543
544 /// @}
545
546 /// @name Checks
547 /// @{
548
549 /// **[Optional]**
550 /// Check that the problem formulation is well-defined, the dimensions match,
551 /// etc. Throws an exception if this is not the case.
552 void check() const;
553
554 /// @}
555
556 /// @name Querying specialized implementations
557 /// @{
558
559 /// Returns true if the problem provides an implementation of
560 /// @ref eval_inactive_indices_res_lna.
562 return vtable.eval_inactive_indices_res_lna != vtable.default_eval_inactive_indices_res_lna;
563 }
564 /// Returns true if the problem provides an implementation of
565 /// @ref eval_jac_g.
567 return vtable.eval_jac_g != vtable.default_eval_jac_g;
568 }
569 /// Returns true if the problem provides an implementation of
570 /// @ref get_jac_g_sparsity.
572 return vtable.get_jac_g_sparsity != vtable.default_get_jac_g_sparsity;
573 }
574 /// Returns true if the problem provides an implementation of
575 /// @ref eval_grad_gi.
577 return vtable.eval_grad_gi != vtable.default_eval_grad_gi;
578 }
579 /// Returns true if the problem provides an implementation of
580 /// @ref eval_hess_L_prod.
582 return vtable.eval_hess_L_prod != vtable.default_eval_hess_L_prod;
583 }
584 /// Returns true if the problem provides an implementation of
585 /// @ref eval_hess_L.
587 return vtable.eval_hess_L != vtable.default_eval_hess_L;
588 }
589 /// Returns true if the problem provides an implementation of
590 /// @ref get_hess_L_sparsity.
592 return vtable.get_hess_L_sparsity != vtable.default_get_hess_L_sparsity;
593 }
594 /// Returns true if the problem provides an implementation of
595 /// @ref eval_hess_ψ_prod.
597 return vtable.eval_hess_ψ_prod != vtable.default_eval_hess_ψ_prod;
598 }
599 /// Returns true if the problem provides an implementation of
600 /// @ref eval_hess_ψ.
602 return vtable.eval_hess_ψ != vtable.default_eval_hess_ψ;
603 }
604 /// Returns true if the problem provides an implementation of
605 /// @ref get_hess_ψ_sparsity.
607 return vtable.get_hess_ψ_sparsity != vtable.default_get_hess_ψ_sparsity;
608 }
609 /// Returns true if the problem provides a specialized implementation of
610 /// @ref eval_f_grad_f, false if it uses the default implementation.
612 return vtable.eval_f_grad_f != vtable.default_eval_f_grad_f;
613 }
614 /// Returns true if the problem provides a specialized implementation of
615 /// @ref eval_f_g, false if it uses the default implementation.
616 [[nodiscard]] bool provides_eval_f_g() const {
617 return vtable.eval_f_g != vtable.default_eval_f_g;
618 }
619 /// Returns true if the problem provides a specialized implementation of
620 /// @ref eval_grad_f_grad_g_prod, false if it uses the default implementation.
622 return vtable.eval_grad_f_grad_g_prod != vtable.default_eval_grad_f_grad_g_prod;
623 }
624 /// Returns true if the problem provides a specialized implementation of
625 /// @ref eval_grad_L, false if it uses the default implementation.
627 return vtable.eval_grad_L != vtable.default_eval_grad_L;
628 }
629 /// Returns true if the problem provides a specialized implementation of
630 /// @ref eval_ψ, false if it uses the default implementation.
631 [[nodiscard]] bool provides_eval_ψ() const { return vtable.eval_ψ != vtable.default_eval_ψ; }
632 /// Returns true if the problem provides a specialized implementation of
633 /// @ref eval_grad_ψ, false if it uses the default implementation.
635 return vtable.eval_grad_ψ != vtable.default_eval_grad_ψ;
636 }
637 /// Returns true if the problem provides a specialized implementation of
638 /// @ref eval_ψ_grad_ψ, false if it uses the default implementation.
640 return vtable.eval_ψ_grad_ψ != vtable.default_eval_ψ_grad_ψ;
641 }
642 /// Returns true if the problem provides an implementation of
643 /// @ref get_box_C.
644 [[nodiscard]] bool provides_get_box_C() const {
645 return vtable.get_box_C != vtable.default_get_box_C;
646 }
647 /// Returns true if the problem provides an implementation of
648 /// @ref get_box_D.
649 [[nodiscard]] bool provides_get_box_D() const {
650 return vtable.get_box_D != vtable.default_get_box_D;
651 }
652 /// Returns true if the problem provides an implementation of @ref check.
653 [[nodiscard]] bool provides_check() const { return vtable.check != vtable.default_check; }
654
655 /// @}
656
657 /// @name Helpers
658 /// @{
659
660 /// Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be
661 /// used to compute ψ(x) and ∇ψ(x).
662 ///
663 /// Computes the result using the following algorithm:
664 /// @f[ \begin{aligned}
665 /// \zeta &= g(x) + \Sigma^{-1} y \\[]
666 /// d &= \zeta - \Pi_D(\zeta)
667 /// = \operatorname{eval\_proj\_diff\_g}(\zeta, \zeta) \\[]
668 /// \hat y &= \Sigma d \\[]
669 /// \end{aligned} @f]
670 /// @see @ref page_math
671 ///
672 /// @param[inout] g_ŷ
673 /// Input @f$ g(x) @f$, outputs @f$ \hat y @f$
674 /// @param[in] y
675 /// Lagrange multipliers @f$ y @f$
676 /// @param[in] Σ
677 /// Penalty weights @f$ \Sigma @f$
678 /// @return The inner product @f$ d^\top \hat y @f$
680
681 /// @}
682};
683
684/// @}
685
686#ifndef DOXYGEN
687template <class Tref>
688explicit TypeErasedProblem(Tref &&d)
690
691template <class Tref, class Allocator>
694#endif
695
696template <Config Conf, class Allocator>
698 return vtable.n;
699}
700template <Config Conf, class Allocator>
702 return vtable.m;
703}
704
705template <Config Conf, class Allocator>
707 return call(vtable.eval_proj_diff_g, z, e);
708}
709template <Config Conf, class Allocator>
711 return call(vtable.eval_proj_multipliers, y, M);
712}
713template <Config Conf, class Allocator>
715 rvec x̂, rvec p) const -> real_t {
716 return call(vtable.eval_prox_grad_step, γ, x, grad_ψ, x̂, p);
717}
718template <Config Conf, class Allocator>
720 crvec grad_ψ,
721 rindexvec J) const
722 -> index_t {
723 return call(vtable.eval_inactive_indices_res_lna, γ, x, grad_ψ, J);
724}
725template <Config Conf, class Allocator>
727 return call(vtable.eval_f, x);
728}
729template <Config Conf, class Allocator>
731 return call(vtable.eval_grad_f, x, grad_fx);
732}
733template <Config Conf, class Allocator>
735 return call(vtable.eval_g, x, gx);
736}
737template <Config Conf, class Allocator>
739 return call(vtable.eval_grad_g_prod, x, y, grad_gxy);
740}
741template <Config Conf, class Allocator>
743 return call(vtable.eval_grad_gi, x, i, grad_gi);
744}
745template <Config Conf, class Allocator>
747 return call(vtable.eval_jac_g, x, J_values);
748}
749template <Config Conf, class Allocator>
751 return call(vtable.get_jac_g_sparsity);
752}
753template <Config Conf, class Allocator>
755 rvec Hv) const {
756 return call(vtable.eval_hess_L_prod, x, y, scale, v, Hv);
757}
758template <Config Conf, class Allocator>
760 rvec H_values) const {
761 return call(vtable.eval_hess_L, x, y, scale, H_values);
762}
763template <Config Conf, class Allocator>
765 return call(vtable.get_hess_L_sparsity);
766}
767template <Config Conf, class Allocator>
769 crvec v, rvec Hv) const {
770 return call(vtable.eval_hess_ψ_prod, x, y, Σ, scale, v, Hv);
771}
772template <Config Conf, class Allocator>
774 rvec H_values) const {
775 return call(vtable.eval_hess_ψ, x, y, Σ, scale, H_values);
776}
777template <Config Conf, class Allocator>
779 return call(vtable.get_hess_ψ_sparsity);
780}
781template <Config Conf, class Allocator>
783 return call(vtable.eval_f_grad_f, x, grad_fx);
784}
785template <Config Conf, class Allocator>
787 return call(vtable.eval_f_g, x, g);
788}
789template <Config Conf, class Allocator>
791 rvec grad_gxy) const {
792 return call(vtable.eval_grad_f_grad_g_prod, x, y, grad_f, grad_gxy);
793}
794template <Config Conf, class Allocator>
796 rvec work_n) const {
797 return call(vtable.eval_grad_L, x, y, grad_L, work_n);
798}
799template <Config Conf, class Allocator>
801 return call(vtable.eval_ψ, x, y, Σ, ŷ);
802}
803template <Config Conf, class Allocator>
805 rvec work_n, rvec work_m) const {
806 return call(vtable.eval_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
807}
808template <Config Conf, class Allocator>
810 rvec work_n, rvec work_m) const -> real_t {
811 return call(vtable.eval_ψ_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
812}
813template <Config Conf, class Allocator>
815 return call(vtable.calc_ŷ_dᵀŷ, g_ŷ, y, Σ);
816}
817template <Config Conf, class Allocator>
819 return call(vtable.get_box_C);
820}
821template <Config Conf, class Allocator>
823 return call(vtable.get_box_D);
824}
825template <Config Conf, class Allocator>
827 return call(vtable.check);
828}
829
830/// @addtogroup grp_Problems
831/// @{
832
833template <Config Conf>
834void print_provided_functions(std::ostream &os, const TypeErasedProblem<Conf> &problem) {
835 os << "inactive_indices_res_lna: " << problem.provides_eval_inactive_indices_res_lna() << '\n'
836 << " grad_gi: " << problem.provides_eval_grad_gi() << '\n'
837 << " jac_g: " << problem.provides_eval_jac_g() << '\n'
838 << " hess_L_prod: " << problem.provides_eval_hess_L_prod() << '\n'
839 << " hess_L: " << problem.provides_eval_hess_L() << '\n'
840 << " hess_ψ_prod: " << problem.provides_eval_hess_ψ_prod() << '\n'
841 << " hess_ψ: " << problem.provides_eval_hess_ψ() << '\n'
842 << " f_grad_f: " << problem.provides_eval_f_grad_f() << '\n'
843 << " f_g: " << problem.provides_eval_f_g() << '\n'
844 << " grad_f_grad_g_prod: " << problem.provides_eval_grad_f_grad_g_prod() << '\n'
845 << " grad_L: " << problem.provides_eval_grad_L() << '\n'
846 << " ψ: " << problem.provides_eval_ψ() << '\n'
847 << " grad_ψ: " << problem.provides_eval_grad_ψ() << '\n'
848 << " ψ_grad_ψ: " << problem.provides_eval_ψ_grad_ψ() << '\n'
849 << " get_box_C: " << problem.provides_get_box_C() << '\n'
850 << " get_box_D: " << problem.provides_get_box_D() << '\n'
851 << " check: " << problem.provides_check() << '\n';
852}
853
854/// @}
855
856} // namespace alpaqa
The main polymorphic minimization problem interface.
bool provides_eval_hess_L() const
Returns true if the problem provides an implementation of eval_hess_L.
real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const
[Required] Function that computes a proximal gradient step.
real_t eval_ψ_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate both ψ(x) and its gradient ∇ψ(x).
bool provides_get_hess_L_sparsity() const
Returns true if the problem provides an implementation of get_hess_L_sparsity.
const Box & get_box_D() const
[Optional] Get the rectangular constraint set of the general constraint function, .
void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const
[Optional] Function that evaluates the gradient of one specific constraint,
bool provides_eval_hess_ψ_prod() const
Returns true if the problem provides an implementation of eval_hess_ψ_prod.
bool provides_eval_ψ_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ_grad_ψ,...
bool provides_get_box_C() const
Returns true if the problem provides an implementation of get_box_C.
void eval_jac_g(crvec x, rvec J_values) const
[Optional] Function that evaluates the nonzero values of the Jacobian matrix of the constraints,
Sparsity get_jac_g_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Jacobian of the constraints.
real_t eval_f_g(crvec x, rvec g) const
[Optional] Evaluate both and .
Sparsity get_hess_ψ_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the augmented Lag...
bool provides_eval_jac_g() const
Returns true if the problem provides an implementation of eval_jac_g.
bool provides_check() const
Returns true if the problem provides an implementation of check.
length_t get_n() const
[Required] Number of decision variables.
void eval_hess_ψ(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the augmented Lagrangian,
Sparsity get_hess_L_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the Lagrangian.
void check() const
[Optional] Check that the problem formulation is well-defined, the dimensions match,...
length_t get_m() const
[Required] Number of constraints.
real_t eval_ψ(crvec x, crvec y, crvec Σ, rvec ŷ) const
[Optional] Calculate both ψ(x) and the vector ŷ that can later be used to compute ∇ψ.
bool provides_eval_inactive_indices_res_lna() const
Returns true if the problem provides an implementation of eval_inactive_indices_res_lna.
void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const
[Optional] Evaluate the gradient of the Lagrangian
void eval_grad_f_grad_g_prod(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const
[Optional] Evaluate both and .
bool provides_eval_grad_f_grad_g_prod() const
Returns true if the problem provides a specialized implementation of eval_grad_f_grad_g_prod,...
static TypeErasedProblem make(Args &&...args)
index_t eval_inactive_indices_res_lna(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const
[Optional] Function that computes the inactive indices for the evaluation of the linear Newton appro...
bool provides_get_hess_ψ_sparsity() const
Returns true if the problem provides an implementation of get_hess_ψ_sparsity.
bool provides_eval_hess_L_prod() const
Returns true if the problem provides an implementation of eval_hess_L_prod.
bool provides_get_jac_g_sparsity() const
Returns true if the problem provides an implementation of get_jac_g_sparsity.
real_t eval_f_grad_f(crvec x, rvec grad_fx) const
[Optional] Evaluate both and its gradient, .
bool provides_eval_f_grad_f() const
Returns true if the problem provides a specialized implementation of eval_f_grad_f,...
void eval_grad_g_prod(crvec x, crvec y, rvec grad_gxy) const
[Required] Function that evaluates the gradient of the constraints times a vector,
void eval_hess_L_prod(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the Lagrangian multiplied by a vector,
bool provides_eval_grad_gi() const
Returns true if the problem provides an implementation of eval_grad_gi.
void eval_proj_multipliers(rvec y, real_t M) const
[Required] Function that projects the Lagrange multipliers for ALM.
bool provides_eval_f_g() const
Returns true if the problem provides a specialized implementation of eval_f_g, false if it uses the d...
void eval_grad_f(crvec x, rvec grad_fx) const
[Required] Function that evaluates the gradient of the cost,
real_t eval_f(crvec x) const
[Required] Function that evaluates the cost,
bool provides_eval_grad_L() const
Returns true if the problem provides a specialized implementation of eval_grad_L, false if it uses th...
bool provides_eval_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_grad_ψ, false if it uses th...
void eval_g(crvec x, rvec gx) const
[Required] Function that evaluates the constraints,
void eval_hess_L(crvec x, crvec y, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the Lagrangian,
bool provides_eval_hess_ψ() const
Returns true if the problem provides an implementation of eval_hess_ψ.
real_t calc_ŷ_dᵀŷ(rvec g_ŷ, crvec y, crvec Σ) const
Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be used to compute ψ(x) and ∇ψ(...
bool provides_get_box_D() const
Returns true if the problem provides an implementation of get_box_D.
const Box & get_box_C() const
[Optional] Get the rectangular constraint set of the decision variables, .
void eval_proj_diff_g(crvec z, rvec e) const
[Required] Function that evaluates the difference between the given point and its projection onto th...
void eval_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate the gradient ∇ψ(x).
bool provides_eval_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ, false if it uses the def...
void eval_hess_ψ_prod(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the augmented Lagrangian multiplied by a vector,
Class for polymorphism through type erasure.
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:56
#define ALPAQA_IF_QUADF(...)
Definition config.hpp:182
#define ALPAQA_IF_LONGD(...)
Definition config.hpp:194
#define ALPAQA_IF_FLOAT(...)
Definition config.hpp:188
#define ALPAQA_EXPORT_EXTERN_TEMPLATE(...)
Definition export.hpp:21
void print_provided_functions(std::ostream &os, const TypeErasedProblem< Conf > &problem)
typename Conf::real_t real_t
Definition config.hpp:65
typename Conf::rindexvec rindexvec
Definition config.hpp:79
typename Conf::index_t index_t
Definition config.hpp:77
typename Conf::length_t length_t
Definition config.hpp:76
constexpr const auto inf
Definition config.hpp:85
typename Conf::rvec rvec
Definition config.hpp:69
typename Conf::crvec crvec
Definition config.hpp:70
#define ALPAQA_TE_OPTIONAL_METHOD(vtable, type, member, instance)
#define ALPAQA_TE_REQUIRED_METHOD(vtable, type, member)
Double-precision double configuration.
Definition config.hpp:135
Single-precision float configuration.
Definition config.hpp:131
long double configuration.
Definition config.hpp:140
Struct containing function pointers to all problem functions (like the objective and constraint funct...
optional_function_t< void() const > check
optional_function_t< Sparsity() const > get_hess_ψ_sparsity
static real_t default_eval_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec ŷ, const ProblemVTable &vtable)
required_function_t< void(crvec x, rvec grad_fx) const > eval_grad_f
required_function_t< void(rvec y, real_t M) const > eval_proj_multipliers
optional_function_t< Sparsity() const > get_hess_L_sparsity
required_function_t< real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const > eval_prox_grad_step
optional_function_t< void(crvec x, index_t i, rvec grad_gi) const > eval_grad_gi
static void default_eval_hess_L_prod(const void *, crvec, crvec, real_t, crvec, rvec, const ProblemVTable &)
required_function_t< real_t(crvec x) const > eval_f
static void default_eval_hess_ψ_prod(const void *self, crvec x, crvec y, crvec, real_t scale, crvec v, rvec Hv, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_grad_ψ
static void default_eval_jac_g(const void *, crvec, rvec, const ProblemVTable &)
optional_function_t< void(crvec x, rvec J_values) const > eval_jac_g
required_function_t< void(crvec z, rvec e) const > eval_proj_diff_g
optional_function_t< real_t(crvec x, rvec g) const > eval_f_g
optional_function_t< Sparsity() const > get_jac_g_sparsity
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec ŷ) const > eval_ψ
static void default_eval_grad_gi(const void *, crvec, index_t, rvec, const ProblemVTable &)
required_function_t< void(crvec x, rvec gx) const > eval_g
util::BasicVTable::optional_function_t< F, ProblemVTable > optional_function_t
static void default_eval_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, rvec grad_L, rvec work_n) const > eval_grad_L
static const Box & default_get_box_C(const void *, const ProblemVTable &)
alpaqa::Sparsity< config_t > Sparsity
static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L, rvec work_n, const ProblemVTable &vtable)
optional_function_t< const Box &() const > get_box_D
static const Box & default_get_box_D(const void *, const ProblemVTable &)
optional_function_t< void(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const > eval_hess_L_prod
static Sparsity default_get_jac_g_sparsity(const void *, const ProblemVTable &)
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_ψ_grad_ψ
optional_function_t< void(crvec x, crvec y, real_t scale, rvec H_values) const > eval_hess_L
static void default_eval_hess_ψ(const void *self, crvec x, crvec y, crvec, real_t scale, rvec H_values, const ProblemVTable &vtable)
static real_t default_eval_f_g(const void *self, crvec x, rvec g, const ProblemVTable &vtable)
required_function_t< void(crvec x, crvec y, rvec grad_gxy) const > eval_grad_g_prod
static index_t default_eval_inactive_indices_res_lna(const void *, real_t, crvec, crvec, rindexvec, const ProblemVTable &)
static void default_check(const void *, const ProblemVTable &)
optional_function_t< real_t(crvec x, rvec grad_fx) const > eval_f_grad_f
static void default_eval_hess_L(const void *, crvec, crvec, real_t, rvec, const ProblemVTable &)
optional_function_t< void(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const > eval_grad_f_grad_g_prod
static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ, const ProblemVTable &vtable)
static void default_eval_grad_f_grad_g_prod(const void *self, crvec x, crvec y, rvec grad_f, rvec grad_gxy, const ProblemVTable &vtable)
static real_t default_eval_f_grad_f(const void *self, crvec x, rvec grad_fx, const ProblemVTable &vtable)
static Sparsity default_get_hess_L_sparsity(const void *, const ProblemVTable &)
static Sparsity default_get_hess_ψ_sparsity(const void *, const ProblemVTable &)
optional_function_t< index_t(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const > eval_inactive_indices_res_lna
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const > eval_hess_ψ
static real_t default_eval_ψ_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const > eval_hess_ψ_prod
optional_function_t< const Box &() const > get_box_C
Stores any of the supported sparsity patterns.
Definition sparsity.hpp:106
Struct that stores the size of a polymorphic object, as well as pointers to functions to copy,...
typename optional_function< F, VTable >::type optional_function_t
An optional function includes a void pointer to self, the arguments of F, and an additional reference...
typename required_function< F >::type required_function_t
A required function includes a void pointer to self, in addition to the arguments of F.