alpaqa 1.0.0a18
Nonconvex constrained optimization
Loading...
Searching...
No Matches
type-erased-problem.hpp
Go to the documentation of this file.
1#pragma once
2
4#include <alpaqa/export.hpp>
12#include <chrono>
13#include <stdexcept>
14#include <type_traits>
15#include <utility>
16
17namespace alpaqa {
18
19/// Struct containing function pointers to all problem functions (like the
20/// objective and constraint functions, with their derivatives, and more).
21/// Some default implementations are available.
22/// Internal struct, it is used by @ref TypeErasedProblem.
23template <Config Conf>
28
29 template <class F>
31
32 // clang-format off
33
34 // Required
39 required_function_t<real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const>
51
52 // Second order
57 optional_function_t<void(crvec x, index_t i, rvec grad_gi) const>
71
72 // Combined evaluations
79
80 // Lagrangian and augmented lagrangian evaluations
81 optional_function_t<void(crvec x, crvec y, rvec grad_L, rvec work_n) const>
85 optional_function_t<void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
87 optional_function_t<real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const>
89
90 // Constraint sets
95
96 // Check
99 optional_function_t<std::string() const>
101
102 // clang-format on
103
105 const ProblemVTable &vtable);
120 const ProblemVTable &vtable);
123 const ProblemVTable &vtable);
126 const ProblemVTable &vtable);
128 const ProblemVTable &vtable);
130 rvec grad_f, rvec grad_gxy,
131 const ProblemVTable &vtable);
132 ALPAQA_EXPORT static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L,
133 rvec work_n, const ProblemVTable &vtable);
135 const ProblemVTable &vtable);
137 rvec grad_ψ, rvec work_n, rvec work_m,
138 const ProblemVTable &vtable);
140 rvec grad_ψ, rvec work_n, rvec work_m,
141 const ProblemVTable &vtable);
146
148
149 template <class P>
150 ProblemVTable(std::in_place_t, P &p) : util::BasicVTable{std::in_place, p} {
151 auto &vtable = *this;
152
153 // Initialize all methods
154
155 // Required
164 // Second order
174 // Combined evaluations
176 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_f_g, p);
178 // Lagrangian and augmented lagrangian evaluations
180 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, eval_ψ, p);
183 // Constraint set
186 // Check
187 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, check, p);
188 ALPAQA_TE_OPTIONAL_METHOD(vtable, P, get_name, p);
189
190 // Dimensions
191 vtable.n = p.get_n();
192 vtable.m = p.get_m();
193 }
194 ProblemVTable() = default;
195};
196
197// clang-format off
202// clang-format on
203
204/// @addtogroup grp_Problems
205/// @{
206
207/// The main polymorphic minimization problem interface.
208///
209/// This class wraps the actual problem implementation class, filling in the
210/// missing member functions with sensible defaults, and providing a uniform
211/// interface that is used by the solvers.
212///
213/// The problem implementations do not inherit from an abstract base class.
214/// Instead, [structural typing](https://en.wikipedia.org/wiki/Structural_type_system)
215/// is used. The @ref ProblemVTable constructor uses reflection to discover
216/// which member functions are provided by the problem implementation. See
217/// @ref page-problem-formulations for more information, and
218/// @ref C++/CustomCppProblem/main.cpp for an example.
219template <Config Conf = DefaultConfig, class Allocator = std::allocator<std::byte>>
221 public:
228 using TypeErased::TypeErased;
229
230 protected:
231 using TypeErased::call;
232 using TypeErased::self;
233 using TypeErased::vtable;
234
235 public:
236 template <class T, class... Args>
238 return TypeErased::template make<TypeErasedProblem, T>(std::forward<Args>(args)...);
239 }
240
241 /// @name Problem dimensions
242 /// @{
243
244 /// **[Required]**
245 /// Number of decision variables.
247 /// **[Required]**
248 /// Number of constraints.
250
251 /// @}
252
253 /// @name Required cost and constraint functions
254 /// @{
255
256 /// **[Required]**
257 /// Function that evaluates the cost, @f$ f(x) @f$
258 /// @param [in] x
259 /// Decision variable @f$ x \in \R^n @f$
261 /// **[Required]**
262 /// Function that evaluates the gradient of the cost, @f$ \nabla f(x) @f$
263 /// @param [in] x
264 /// Decision variable @f$ x \in \R^n @f$
265 /// @param [out] grad_fx
266 /// Gradient of cost function @f$ \nabla f(x) \in \R^n @f$
268 /// **[Required]**
269 /// Function that evaluates the constraints, @f$ g(x) @f$
270 /// @param [in] x
271 /// Decision variable @f$ x \in \R^n @f$
272 /// @param [out] gx
273 /// Value of the constraints @f$ g(x) \in \R^m @f$
274 void eval_g(crvec x, rvec gx) const;
275 /// **[Required]**
276 /// Function that evaluates the gradient of the constraints times a vector,
277 /// @f$ \nabla g(x)\,y = \tp{\jac_g(x)}y @f$
278 /// @param [in] x
279 /// Decision variable @f$ x \in \R^n @f$
280 /// @param [in] y
281 /// Vector @f$ y \in \R^m @f$ to multiply the gradient by
282 /// @param [out] grad_gxy
283 /// Gradient of the constraints
284 /// @f$ \nabla g(x)\,y \in \R^n @f$
286
287 /// @}
288
289 /// @name Projections onto constraint sets and proximal mappings
290 /// @{
291
292 /// **[Required]**
293 /// Function that evaluates the difference between the given point @f$ z @f$
294 /// and its projection onto the constraint set @f$ D @f$.
295 /// @param [in] z
296 /// Slack variable, @f$ z \in \R^m @f$
297 /// @param [out] e
298 /// The difference relative to its projection,
299 /// @f$ e = z - \Pi_D(z) \in \R^m @f$
300 /// @note @p z and @p e can refer to the same vector.
301 void eval_proj_diff_g(crvec z, rvec e) const;
302 /// **[Required]**
303 /// Function that projects the Lagrange multipliers for ALM.
304 /// @param [inout] y
305 /// Multipliers, @f$ y \leftarrow \Pi_Y(y) \in \R^m @f$
306 /// @param [in] M
307 /// The radius/size of the set @f$ Y @f$.
308 /// See @ref ALMParams::max_multiplier.
310 /// **[Required]**
311 /// Function that computes a proximal gradient step.
312 /// @param [in] γ
313 /// Step size, @f$ \gamma \in \R_{>0} @f$
314 /// @param [in] x
315 /// Decision variable @f$ x \in \R^n @f$
316 /// @param [in] grad_ψ
317 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
318 /// @param [out] x̂
319 /// Next proximal gradient iterate, @f$ \hat x = T_\gamma(x) =
320 /// \prox_{\gamma h}(x - \gamma\nabla\psi(x)) \in \R^n @f$
321 /// @param [out] p
322 /// The proximal gradient step,
323 /// @f$ p = \hat x - x \in \R^n @f$
324 /// @return The nonsmooth function evaluated at x̂,
325 /// @f$ h(\hat x) @f$.
326 /// @note The vector @f$ p @f$ is often used in stopping criteria, so its
327 /// numerical accuracy is more important than that of @f$ \hat x @f$.
328 real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const;
329 /// **[Optional]**
330 /// Function that computes the inactive indices @f$ \mathcal J(x) @f$ for
331 /// the evaluation of the linear Newton approximation of the residual, as in
332 /// @cite pas2022alpaqa.
333 /// @param [in] γ
334 /// Step size, @f$ \gamma \in \R_{>0} @f$
335 /// @param [in] x
336 /// Decision variable @f$ x \in \R^n @f$
337 /// @param [in] grad_ψ
338 /// Gradient of the subproblem cost, @f$ \nabla\psi(x) \in \R^n @f$
339 /// @param [out] J
340 /// The indices of the components of @f$ x @f$ that are in the
341 /// index set @f$ \mathcal J(x) @f$. In ascending order, at most n.
342 /// @return The number of inactive constraints, @f$ \# \mathcal J(x) @f$.
343 ///
344 /// For example, in the case of box constraints, we have
345 /// @f[ \mathcal J(x) \defeq \defset{i \in \N_{[0, n-1]}}{\underline x_i
346 /// \lt x_i - \gamma\nabla_{\!x_i}\psi(x) \lt \overline x_i}. @f]
348 rindexvec J) const;
349
350 /// @}
351
352 /// @name Constraint sets
353 /// @{
354
355 /// **[Optional]**
356 /// Get the rectangular constraint set of the decision variables,
357 /// @f$ x \in C @f$.
358 [[nodiscard]] const Box &get_box_C() const;
359 /// **[Optional]**
360 /// Get the rectangular constraint set of the general constraint function,
361 /// @f$ g(x) \in D @f$.
362 [[nodiscard]] const Box &get_box_D() const;
363
364 /// @}
365
366 /// @name Functions for second-order solvers
367 /// @{
368
369 /// **[Optional]**
370 /// Function that evaluates the nonzero values of the Jacobian matrix of the
371 /// constraints, @f$ \jac_g(x) @f$
372 /// @param [in] x
373 /// Decision variable @f$ x \in \R^n @f$
374 /// @param [out] J_values
375 /// Nonzero values of the Jacobian
376 /// @f$ \jac_g(x) \in \R^{m\times n} @f$
377 ///
378 /// Required for second-order solvers only.
380 /// **[Optional]**
381 /// Function that returns (a view of) the sparsity pattern of the Jacobian
382 /// of the constraints.
383 ///
384 /// Required for second-order solvers only.
386 /// **[Optional]**
387 /// Function that evaluates the gradient of one specific constraint,
388 /// @f$ \nabla g_i(x) @f$
389 /// @param [in] x
390 /// Decision variable @f$ x \in \R^n @f$
391 /// @param [in] i
392 /// Which constraint @f$ 0 \le i \lt m @f$
393 /// @param [out] grad_gi
394 /// Gradient of the constraint
395 /// @f$ \nabla g_i(x) \in \R^n @f$
396 ///
397 /// Required for second-order solvers only.
398 void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const;
399 /// **[Optional]**
400 /// Function that evaluates the Hessian of the Lagrangian multiplied by a
401 /// vector,
402 /// @f$ \nabla_{xx}^2L(x, y)\,v @f$
403 /// @param [in] x
404 /// Decision variable @f$ x \in \R^n @f$
405 /// @param [in] y
406 /// Lagrange multipliers @f$ y \in \R^m @f$
407 /// @param [in] scale
408 /// Scale factor for the cost function.
409 /// @param [in] v
410 /// Vector to multiply by @f$ v \in \R^n @f$
411 /// @param [out] Hv
412 /// Hessian-vector product
413 /// @f$ \nabla_{xx}^2 L(x, y)\,v \in \R^{n} @f$
414 ///
415 /// Required for second-order solvers only.
417 /// **[Optional]**
418 /// Function that evaluates the nonzero values of the Hessian of the
419 /// Lagrangian, @f$ \nabla_{xx}^2L(x, y) @f$
420 /// @param [in] x
421 /// Decision variable @f$ x \in \R^n @f$
422 /// @param [in] y
423 /// Lagrange multipliers @f$ y \in \R^m @f$
424 /// @param [in] scale
425 /// Scale factor for the cost function.
426 /// @param [out] H_values
427 /// Nonzero values of the Hessian
428 /// @f$ \nabla_{xx}^2 L(x, y) \in \R^{n\times n} @f$.
429 ///
430 /// Required for second-order solvers only.
432 /// **[Optional]**
433 /// Function that returns (a view of) the sparsity pattern of the Hessian of
434 /// the Lagrangian.
435 ///
436 /// Required for second-order solvers only.
438 /// **[Optional]**
439 /// Function that evaluates the Hessian of the augmented Lagrangian
440 /// multiplied by a vector,
441 /// @f$ \nabla_{xx}^2L_\Sigma(x, y)\,v @f$
442 /// @param [in] x
443 /// Decision variable @f$ x \in \R^n @f$
444 /// @param [in] y
445 /// Lagrange multipliers @f$ y \in \R^m @f$
446 /// @param [in] Σ
447 /// Penalty weights @f$ \Sigma @f$
448 /// @param [in] scale
449 /// Scale factor for the cost function.
450 /// @param [in] v
451 /// Vector to multiply by @f$ v \in \R^n @f$
452 /// @param [out] Hv
453 /// Hessian-vector product
454 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y)\,v \in \R^{n} @f$
455 ///
456 /// Required for second-order solvers only.
458 /// **[Optional]**
459 /// Function that evaluates the nonzero values of the Hessian of the
460 /// augmented Lagrangian, @f$ \nabla_{xx}^2L_\Sigma(x, y) @f$
461 /// @param [in] x
462 /// Decision variable @f$ x \in \R^n @f$
463 /// @param [in] y
464 /// Lagrange multipliers @f$ y \in \R^m @f$
465 /// @param [in] Σ
466 /// Penalty weights @f$ \Sigma @f$
467 /// @param [in] scale
468 /// Scale factor for the cost function.
469 /// @param [out] H_values
470 /// Nonzero values of the Hessian
471 /// @f$ \nabla_{xx}^2 L_\Sigma(x, y) \in \R^{n\times n} @f$
472 ///
473 /// Required for second-order solvers only.
475 /// **[Optional]**
476 /// Function that returns (a view of) the sparsity pattern of the Hessian of
477 /// the augmented Lagrangian.
478 ///
479 /// Required for second-order solvers only.
481
482 /// @}
483
484 /// @name Combined evaluations
485 /// @{
486
487 /// **[Optional]**
488 /// Evaluate both @f$ f(x) @f$ and its gradient, @f$ \nabla f(x) @f$.
489 /// @default_impl ProblemVTable::default_eval_f_grad_f
491 /// **[Optional]**
492 /// Evaluate both @f$ f(x) @f$ and @f$ g(x) @f$.
493 /// @default_impl ProblemVTable::default_eval_f_g
495 /// **[Optional]**
496 /// Evaluate both @f$ \nabla f(x) @f$ and @f$ \nabla g(x)\,y @f$.
497 /// @default_impl ProblemVTable::default_eval_grad_f_grad_g_prod
499 /// **[Optional]**
500 /// Evaluate the gradient of the Lagrangian
501 /// @f$ \nabla_x L(x, y) = \nabla f(x) + \nabla g(x)\,y @f$
502 /// @default_impl ProblemVTable::default_eval_grad_L
503 void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const;
504
505 /// @}
506
507 /// @name Augmented Lagrangian
508 /// @{
509
510 /// **[Optional]**
511 /// Calculate both ψ(x) and the vector ŷ that can later be used to compute
512 /// ∇ψ.
513 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
514 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
515 /// @f[ \hat y = \Sigma\, \left(g(x) + \Sigma^{-1}y - \Pi_D\left(g(x)
516 /// + \Sigma^{-1}y\right)\right) @f]
517 /// @default_impl ProblemVTable::default_eval_ψ
518 [[nodiscard]] real_t eval_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
519 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
520 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
521 rvec ŷ ///< [out] @f$ \hat y @f$
522 ) const;
523 /// **[Optional]**
524 /// Calculate the gradient ∇ψ(x).
525 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
526 /// @default_impl ProblemVTable::default_eval_grad_ψ
527 void eval_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
528 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
529 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
530 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
531 rvec work_n, ///< Dimension @f$ n @f$
532 rvec work_m ///< Dimension @f$ m @f$
533 ) const;
534 /// **[Optional]**
535 /// Calculate both ψ(x) and its gradient ∇ψ(x).
536 /// @f[ \psi(x) = f(x) + \tfrac{1}{2}
537 /// \text{dist}_\Sigma^2\left(g(x) + \Sigma^{-1}y,\;D\right) @f]
538 /// @f[ \nabla \psi(x) = \nabla f(x) + \nabla g(x)\,\hat y(x) @f]
539 /// @default_impl ProblemVTable::default_eval_ψ_grad_ψ
540 [[nodiscard]] real_t eval_ψ_grad_ψ(crvec x, ///< [in] Decision variable @f$ x @f$
541 crvec y, ///< [in] Lagrange multipliers @f$ y @f$
542 crvec Σ, ///< [in] Penalty weights @f$ \Sigma @f$
543 rvec grad_ψ, ///< [out] @f$ \nabla \psi(x) @f$
544 rvec work_n, ///< Dimension @f$ n @f$
545 rvec work_m ///< Dimension @f$ m @f$
546 ) const;
547
548 /// @}
549
550 /// @name Checks
551 /// @{
552
553 /// **[Optional]**
554 /// Check that the problem formulation is well-defined, the dimensions match,
555 /// etc. Throws an exception if this is not the case.
556 void check() const;
557
558 /// @}
559
560 /// @name Metadata
561 /// @{
562
563 /// **[Optional]**
564 /// Get a descriptive name for the problem.
565 [[nodiscard]] std::string get_name() const;
566
567 /// @}
568
569 /// @name Querying specialized implementations
570 /// @{
571
572 /// Returns true if the problem provides an implementation of
573 /// @ref eval_inactive_indices_res_lna.
575 return vtable.eval_inactive_indices_res_lna != vtable.default_eval_inactive_indices_res_lna;
576 }
577 /// Returns true if the problem provides an implementation of
578 /// @ref eval_jac_g.
580 return vtable.eval_jac_g != vtable.default_eval_jac_g;
581 }
582 /// Returns true if the problem provides an implementation of
583 /// @ref get_jac_g_sparsity.
585 return vtable.get_jac_g_sparsity != vtable.default_get_jac_g_sparsity;
586 }
587 /// Returns true if the problem provides an implementation of
588 /// @ref eval_grad_gi.
590 return vtable.eval_grad_gi != vtable.default_eval_grad_gi;
591 }
592 /// Returns true if the problem provides an implementation of
593 /// @ref eval_hess_L_prod.
595 return vtable.eval_hess_L_prod != vtable.default_eval_hess_L_prod;
596 }
597 /// Returns true if the problem provides an implementation of
598 /// @ref eval_hess_L.
600 return vtable.eval_hess_L != vtable.default_eval_hess_L;
601 }
602 /// Returns true if the problem provides an implementation of
603 /// @ref get_hess_L_sparsity.
605 return vtable.get_hess_L_sparsity != vtable.default_get_hess_L_sparsity;
606 }
607 /// Returns true if the problem provides an implementation of
608 /// @ref eval_hess_ψ_prod.
610 return vtable.eval_hess_ψ_prod != vtable.default_eval_hess_ψ_prod;
611 }
612 /// Returns true if the problem provides an implementation of
613 /// @ref eval_hess_ψ.
615 return vtable.eval_hess_ψ != vtable.default_eval_hess_ψ;
616 }
617 /// Returns true if the problem provides an implementation of
618 /// @ref get_hess_ψ_sparsity.
620 return vtable.get_hess_ψ_sparsity != vtable.default_get_hess_ψ_sparsity;
621 }
622 /// Returns true if the problem provides a specialized implementation of
623 /// @ref eval_f_grad_f, false if it uses the default implementation.
625 return vtable.eval_f_grad_f != vtable.default_eval_f_grad_f;
626 }
627 /// Returns true if the problem provides a specialized implementation of
628 /// @ref eval_f_g, false if it uses the default implementation.
629 [[nodiscard]] bool provides_eval_f_g() const {
630 return vtable.eval_f_g != vtable.default_eval_f_g;
631 }
632 /// Returns true if the problem provides a specialized implementation of
633 /// @ref eval_grad_f_grad_g_prod, false if it uses the default implementation.
635 return vtable.eval_grad_f_grad_g_prod != vtable.default_eval_grad_f_grad_g_prod;
636 }
637 /// Returns true if the problem provides a specialized implementation of
638 /// @ref eval_grad_L, false if it uses the default implementation.
640 return vtable.eval_grad_L != vtable.default_eval_grad_L;
641 }
642 /// Returns true if the problem provides a specialized implementation of
643 /// @ref eval_ψ, false if it uses the default implementation.
644 [[nodiscard]] bool provides_eval_ψ() const { return vtable.eval_ψ != vtable.default_eval_ψ; }
645 /// Returns true if the problem provides a specialized implementation of
646 /// @ref eval_grad_ψ, false if it uses the default implementation.
648 return vtable.eval_grad_ψ != vtable.default_eval_grad_ψ;
649 }
650 /// Returns true if the problem provides a specialized implementation of
651 /// @ref eval_ψ_grad_ψ, false if it uses the default implementation.
653 return vtable.eval_ψ_grad_ψ != vtable.default_eval_ψ_grad_ψ;
654 }
655 /// Returns true if the problem provides an implementation of
656 /// @ref get_box_C.
657 [[nodiscard]] bool provides_get_box_C() const {
658 return vtable.get_box_C != vtable.default_get_box_C;
659 }
660 /// Returns true if the problem provides an implementation of
661 /// @ref get_box_D.
662 [[nodiscard]] bool provides_get_box_D() const {
663 return vtable.get_box_D != vtable.default_get_box_D;
664 }
665 /// Returns true if the problem provides an implementation of @ref check.
666 [[nodiscard]] bool provides_check() const { return vtable.check != vtable.default_check; }
667 /// Returns true if the problem provides an implementation of @ref get_name.
668 [[nodiscard]] bool provides_get_name() const {
669 return vtable.get_name != vtable.default_get_name;
670 }
671
672 /// @}
673
674 /// @name Querying available functions
675 /// @{
676
677 /// Returns true if @ref eval_hess_ψ_prod can be called.
679 return provides_eval_hess_ψ_prod() || (vtable.m == 0 && provides_eval_hess_L_prod());
680 }
681 /// Returns true if @ref eval_hess_ψ can be called.
683 return provides_eval_hess_ψ() || (vtable.m == 0 && provides_eval_hess_L());
684 }
685
686 /// @}
687
688 /// @name Helpers
689 /// @{
690
691 /// Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be
692 /// used to compute ψ(x) and ∇ψ(x).
693 ///
694 /// Computes the result using the following algorithm:
695 /// @f[ \begin{aligned}
696 /// \zeta &= g(x) + \Sigma^{-1} y \\[]
697 /// d &= \zeta - \Pi_D(\zeta)
698 /// = \operatorname{eval\_proj\_diff\_g}(\zeta, \zeta) \\[]
699 /// \hat y &= \Sigma d \\[]
700 /// \end{aligned} @f]
701 /// @see @ref page_math
702 ///
703 /// @param[inout] g_ŷ
704 /// Input @f$ g(x) @f$, outputs @f$ \hat y @f$
705 /// @param[in] y
706 /// Lagrange multipliers @f$ y @f$
707 /// @param[in] Σ
708 /// Penalty weights @f$ \Sigma @f$
709 /// @return The inner product @f$ d^\top \hat y @f$
711
712 /// @}
713};
714
715/// @}
716
717#ifndef DOXYGEN
718template <class Tref>
719explicit TypeErasedProblem(Tref &&d)
721
722template <class Tref, class Allocator>
725#endif
726
727template <Config Conf, class Allocator>
729 return vtable.n;
730}
731template <Config Conf, class Allocator>
733 return vtable.m;
734}
735
736template <Config Conf, class Allocator>
738 return call(vtable.eval_proj_diff_g, z, e);
739}
740template <Config Conf, class Allocator>
742 return call(vtable.eval_proj_multipliers, y, M);
743}
744template <Config Conf, class Allocator>
746 rvec x̂, rvec p) const -> real_t {
747 return call(vtable.eval_prox_grad_step, γ, x, grad_ψ, x̂, p);
748}
749template <Config Conf, class Allocator>
751 crvec grad_ψ,
752 rindexvec J) const
753 -> index_t {
754 return call(vtable.eval_inactive_indices_res_lna, γ, x, grad_ψ, J);
755}
756template <Config Conf, class Allocator>
758 return call(vtable.eval_f, x);
759}
760template <Config Conf, class Allocator>
762 return call(vtable.eval_grad_f, x, grad_fx);
763}
764template <Config Conf, class Allocator>
766 return call(vtable.eval_g, x, gx);
767}
768template <Config Conf, class Allocator>
770 return call(vtable.eval_grad_g_prod, x, y, grad_gxy);
771}
772template <Config Conf, class Allocator>
774 return call(vtable.eval_grad_gi, x, i, grad_gi);
775}
776template <Config Conf, class Allocator>
778 return call(vtable.eval_jac_g, x, J_values);
779}
780template <Config Conf, class Allocator>
782 return call(vtable.get_jac_g_sparsity);
783}
784template <Config Conf, class Allocator>
786 rvec Hv) const {
787 return call(vtable.eval_hess_L_prod, x, y, scale, v, Hv);
788}
789template <Config Conf, class Allocator>
791 rvec H_values) const {
792 return call(vtable.eval_hess_L, x, y, scale, H_values);
793}
794template <Config Conf, class Allocator>
796 return call(vtable.get_hess_L_sparsity);
797}
798template <Config Conf, class Allocator>
800 crvec v, rvec Hv) const {
801 return call(vtable.eval_hess_ψ_prod, x, y, Σ, scale, v, Hv);
802}
803template <Config Conf, class Allocator>
805 rvec H_values) const {
806 return call(vtable.eval_hess_ψ, x, y, Σ, scale, H_values);
807}
808template <Config Conf, class Allocator>
810 return call(vtable.get_hess_ψ_sparsity);
811}
812template <Config Conf, class Allocator>
814 return call(vtable.eval_f_grad_f, x, grad_fx);
815}
816template <Config Conf, class Allocator>
818 return call(vtable.eval_f_g, x, g);
819}
820template <Config Conf, class Allocator>
822 rvec grad_gxy) const {
823 return call(vtable.eval_grad_f_grad_g_prod, x, y, grad_f, grad_gxy);
824}
825template <Config Conf, class Allocator>
827 rvec work_n) const {
828 return call(vtable.eval_grad_L, x, y, grad_L, work_n);
829}
830template <Config Conf, class Allocator>
832 return call(vtable.eval_ψ, x, y, Σ, ŷ);
833}
834template <Config Conf, class Allocator>
836 rvec work_n, rvec work_m) const {
837 return call(vtable.eval_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
838}
839template <Config Conf, class Allocator>
841 rvec work_n, rvec work_m) const -> real_t {
842 return call(vtable.eval_ψ_grad_ψ, x, y, Σ, grad_ψ, work_n, work_m);
843}
844template <Config Conf, class Allocator>
846 return call(vtable.calc_ŷ_dᵀŷ, g_ŷ, y, Σ);
847}
848template <Config Conf, class Allocator>
850 return call(vtable.get_box_C);
851}
852template <Config Conf, class Allocator>
854 return call(vtable.get_box_D);
855}
856template <Config Conf, class Allocator>
858 return call(vtable.check);
859}
860template <Config Conf, class Allocator>
862 return call(vtable.get_name);
863}
864
865/// @addtogroup grp_Problems
866/// @{
867
868template <Config Conf>
869void print_provided_functions(std::ostream &os, const TypeErasedProblem<Conf> &problem) {
870 os << "inactive_indices_res_lna: " << problem.provides_eval_inactive_indices_res_lna() << '\n'
871 << " grad_gi: " << problem.provides_eval_grad_gi() << '\n'
872 << " jac_g: " << problem.provides_eval_jac_g() << '\n'
873 << " hess_L_prod: " << problem.provides_eval_hess_L_prod() << '\n'
874 << " hess_L: " << problem.provides_eval_hess_L() << '\n'
875 << " hess_ψ_prod: " << problem.provides_eval_hess_ψ_prod() << '\n'
876 << " hess_ψ: " << problem.provides_eval_hess_ψ() << '\n'
877 << " f_grad_f: " << problem.provides_eval_f_grad_f() << '\n'
878 << " f_g: " << problem.provides_eval_f_g() << '\n'
879 << " grad_f_grad_g_prod: " << problem.provides_eval_grad_f_grad_g_prod() << '\n'
880 << " grad_L: " << problem.provides_eval_grad_L() << '\n'
881 << " ψ: " << problem.provides_eval_ψ() << '\n'
882 << " grad_ψ: " << problem.provides_eval_grad_ψ() << '\n'
883 << " ψ_grad_ψ: " << problem.provides_eval_ψ_grad_ψ() << '\n'
884 << " get_box_C: " << problem.provides_get_box_C() << '\n'
885 << " get_box_D: " << problem.provides_get_box_D() << '\n'
886 << " check: " << problem.provides_check() << '\n'
887 << " get_name: " << problem.provides_get_name() << '\n';
888}
889
890/// @}
891
892} // namespace alpaqa
The main polymorphic minimization problem interface.
bool provides_eval_hess_L() const
Returns true if the problem provides an implementation of eval_hess_L.
real_t eval_prox_grad_step(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const
[Required] Function that computes a proximal gradient step.
std::string get_name() const
[Optional] Get a descriptive name for the problem.
real_t eval_ψ_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate both ψ(x) and its gradient ∇ψ(x).
bool provides_get_hess_L_sparsity() const
Returns true if the problem provides an implementation of get_hess_L_sparsity.
const Box & get_box_D() const
[Optional] Get the rectangular constraint set of the general constraint function, .
void eval_grad_gi(crvec x, index_t i, rvec grad_gi) const
[Optional] Function that evaluates the gradient of one specific constraint,
bool provides_eval_hess_ψ_prod() const
Returns true if the problem provides an implementation of eval_hess_ψ_prod.
bool provides_eval_ψ_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ_grad_ψ,...
bool provides_get_box_C() const
Returns true if the problem provides an implementation of get_box_C.
void eval_jac_g(crvec x, rvec J_values) const
[Optional] Function that evaluates the nonzero values of the Jacobian matrix of the constraints,
Sparsity get_jac_g_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Jacobian of the constraints.
real_t eval_f_g(crvec x, rvec g) const
[Optional] Evaluate both and .
Sparsity get_hess_ψ_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the augmented Lag...
bool provides_eval_jac_g() const
Returns true if the problem provides an implementation of eval_jac_g.
bool provides_check() const
Returns true if the problem provides an implementation of check.
length_t get_n() const
[Required] Number of decision variables.
void eval_hess_ψ(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the augmented Lagrangian,
Sparsity get_hess_L_sparsity() const
[Optional] Function that returns (a view of) the sparsity pattern of the Hessian of the Lagrangian.
void check() const
[Optional] Check that the problem formulation is well-defined, the dimensions match,...
length_t get_m() const
[Required] Number of constraints.
real_t eval_ψ(crvec x, crvec y, crvec Σ, rvec ŷ) const
[Optional] Calculate both ψ(x) and the vector ŷ that can later be used to compute ∇ψ.
bool provides_eval_inactive_indices_res_lna() const
Returns true if the problem provides an implementation of eval_inactive_indices_res_lna.
bool provides_get_name() const
Returns true if the problem provides an implementation of get_name.
void eval_grad_L(crvec x, crvec y, rvec grad_L, rvec work_n) const
[Optional] Evaluate the gradient of the Lagrangian
void eval_grad_f_grad_g_prod(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const
[Optional] Evaluate both and .
bool provides_eval_grad_f_grad_g_prod() const
Returns true if the problem provides a specialized implementation of eval_grad_f_grad_g_prod,...
static TypeErasedProblem make(Args &&...args)
index_t eval_inactive_indices_res_lna(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const
[Optional] Function that computes the inactive indices for the evaluation of the linear Newton appro...
bool provides_get_hess_ψ_sparsity() const
Returns true if the problem provides an implementation of get_hess_ψ_sparsity.
bool provides_eval_hess_L_prod() const
Returns true if the problem provides an implementation of eval_hess_L_prod.
bool supports_eval_hess_ψ_prod() const
Returns true if eval_hess_ψ_prod can be called.
bool provides_get_jac_g_sparsity() const
Returns true if the problem provides an implementation of get_jac_g_sparsity.
real_t eval_f_grad_f(crvec x, rvec grad_fx) const
[Optional] Evaluate both and its gradient, .
bool provides_eval_f_grad_f() const
Returns true if the problem provides a specialized implementation of eval_f_grad_f,...
bool supports_eval_hess_ψ() const
Returns true if eval_hess_ψ can be called.
void eval_grad_g_prod(crvec x, crvec y, rvec grad_gxy) const
[Required] Function that evaluates the gradient of the constraints times a vector,
void eval_hess_L_prod(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the Lagrangian multiplied by a vector,
bool provides_eval_grad_gi() const
Returns true if the problem provides an implementation of eval_grad_gi.
void eval_proj_multipliers(rvec y, real_t M) const
[Required] Function that projects the Lagrange multipliers for ALM.
bool provides_eval_f_g() const
Returns true if the problem provides a specialized implementation of eval_f_g, false if it uses the d...
void eval_grad_f(crvec x, rvec grad_fx) const
[Required] Function that evaluates the gradient of the cost,
real_t eval_f(crvec x) const
[Required] Function that evaluates the cost,
bool provides_eval_grad_L() const
Returns true if the problem provides a specialized implementation of eval_grad_L, false if it uses th...
bool provides_eval_grad_ψ() const
Returns true if the problem provides a specialized implementation of eval_grad_ψ, false if it uses th...
void eval_g(crvec x, rvec gx) const
[Required] Function that evaluates the constraints,
void eval_hess_L(crvec x, crvec y, real_t scale, rvec H_values) const
[Optional] Function that evaluates the nonzero values of the Hessian of the Lagrangian,
bool provides_eval_hess_ψ() const
Returns true if the problem provides an implementation of eval_hess_ψ.
real_t calc_ŷ_dᵀŷ(rvec g_ŷ, crvec y, crvec Σ) const
Given g(x), compute the intermediate results ŷ and dᵀŷ that can later be used to compute ψ(x) and ∇ψ(...
bool provides_get_box_D() const
Returns true if the problem provides an implementation of get_box_D.
const Box & get_box_C() const
[Optional] Get the rectangular constraint set of the decision variables, .
void eval_proj_diff_g(crvec z, rvec e) const
[Required] Function that evaluates the difference between the given point and its projection onto th...
void eval_grad_ψ(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const
[Optional] Calculate the gradient ∇ψ(x).
bool provides_eval_ψ() const
Returns true if the problem provides a specialized implementation of eval_ψ, false if it uses the def...
void eval_hess_ψ_prod(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const
[Optional] Function that evaluates the Hessian of the augmented Lagrangian multiplied by a vector,
Class for polymorphism through type erasure.
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:77
#define ALPAQA_IF_QUADF(...)
Definition config.hpp:221
#define ALPAQA_IF_LONGD(...)
Definition config.hpp:233
#define ALPAQA_IF_FLOAT(...)
Definition config.hpp:227
#define ALPAQA_EXPORT_EXTERN_TEMPLATE(...)
Definition export.hpp:21
void print_provided_functions(std::ostream &os, const TypeErasedProblem< Conf > &problem)
typename Conf::real_t real_t
Definition config.hpp:86
typename Conf::rindexvec rindexvec
Definition config.hpp:106
typename Conf::index_t index_t
Definition config.hpp:104
typename Conf::length_t length_t
Definition config.hpp:103
constexpr const auto inf
Definition config.hpp:112
typename Conf::rvec rvec
Definition config.hpp:91
typename Conf::crvec crvec
Definition config.hpp:92
#define ALPAQA_TE_OPTIONAL_METHOD(vtable, type, member, instance)
#define ALPAQA_TE_REQUIRED_METHOD(vtable, type, member)
Double-precision double configuration.
Definition config.hpp:174
Single-precision float configuration.
Definition config.hpp:170
long double configuration.
Definition config.hpp:179
Struct containing function pointers to all problem functions (like the objective and constraint funct...
optional_function_t< void() const > check
static std::string default_get_name(const void *, const ProblemVTable &)
optional_function_t< Sparsity() const > get_hess_ψ_sparsity
static real_t default_eval_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec ŷ, const ProblemVTable &vtable)
required_function_t< void(crvec x, rvec grad_fx) const > eval_grad_f
required_function_t< void(rvec y, real_t M) const > eval_proj_multipliers
optional_function_t< Sparsity() const > get_hess_L_sparsity
required_function_t< real_t(real_t γ, crvec x, crvec grad_ψ, rvec x̂, rvec p) const > eval_prox_grad_step
optional_function_t< void(crvec x, index_t i, rvec grad_gi) const > eval_grad_gi
static void default_eval_hess_L_prod(const void *, crvec, crvec, real_t, crvec, rvec, const ProblemVTable &)
required_function_t< real_t(crvec x) const > eval_f
static void default_eval_hess_ψ_prod(const void *self, crvec x, crvec y, crvec, real_t scale, crvec v, rvec Hv, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_grad_ψ
static void default_eval_jac_g(const void *, crvec, rvec, const ProblemVTable &)
optional_function_t< void(crvec x, rvec J_values) const > eval_jac_g
required_function_t< void(crvec z, rvec e) const > eval_proj_diff_g
optional_function_t< real_t(crvec x, rvec g) const > eval_f_g
optional_function_t< Sparsity() const > get_jac_g_sparsity
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec ŷ) const > eval_ψ
static void default_eval_grad_gi(const void *, crvec, index_t, rvec, const ProblemVTable &)
required_function_t< void(crvec x, rvec gx) const > eval_g
util::BasicVTable::optional_function_t< F, ProblemVTable > optional_function_t
static void default_eval_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, rvec grad_L, rvec work_n) const > eval_grad_L
static const Box & default_get_box_C(const void *, const ProblemVTable &)
alpaqa::Sparsity< config_t > Sparsity
static void default_eval_grad_L(const void *self, crvec x, crvec y, rvec grad_L, rvec work_n, const ProblemVTable &vtable)
optional_function_t< const Box &() const > get_box_D
static const Box & default_get_box_D(const void *, const ProblemVTable &)
optional_function_t< void(crvec x, crvec y, real_t scale, crvec v, rvec Hv) const > eval_hess_L_prod
static Sparsity default_get_jac_g_sparsity(const void *, const ProblemVTable &)
optional_function_t< real_t(crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m) const > eval_ψ_grad_ψ
optional_function_t< void(crvec x, crvec y, real_t scale, rvec H_values) const > eval_hess_L
static void default_eval_hess_ψ(const void *self, crvec x, crvec y, crvec, real_t scale, rvec H_values, const ProblemVTable &vtable)
static real_t default_eval_f_g(const void *self, crvec x, rvec g, const ProblemVTable &vtable)
required_function_t< void(crvec x, crvec y, rvec grad_gxy) const > eval_grad_g_prod
static index_t default_eval_inactive_indices_res_lna(const void *, real_t, crvec, crvec, rindexvec, const ProblemVTable &)
static void default_check(const void *, const ProblemVTable &)
optional_function_t< real_t(crvec x, rvec grad_fx) const > eval_f_grad_f
static void default_eval_hess_L(const void *, crvec, crvec, real_t, rvec, const ProblemVTable &)
optional_function_t< void(crvec x, crvec y, rvec grad_f, rvec grad_gxy) const > eval_grad_f_grad_g_prod
static real_t calc_ŷ_dᵀŷ(const void *self, rvec g_ŷ, crvec y, crvec Σ, const ProblemVTable &vtable)
optional_function_t< std::string() const > get_name
static void default_eval_grad_f_grad_g_prod(const void *self, crvec x, crvec y, rvec grad_f, rvec grad_gxy, const ProblemVTable &vtable)
static real_t default_eval_f_grad_f(const void *self, crvec x, rvec grad_fx, const ProblemVTable &vtable)
static Sparsity default_get_hess_L_sparsity(const void *, const ProblemVTable &)
static Sparsity default_get_hess_ψ_sparsity(const void *, const ProblemVTable &)
optional_function_t< index_t(real_t γ, crvec x, crvec grad_ψ, rindexvec J) const > eval_inactive_indices_res_lna
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, rvec H_values) const > eval_hess_ψ
static real_t default_eval_ψ_grad_ψ(const void *self, crvec x, crvec y, crvec Σ, rvec grad_ψ, rvec work_n, rvec work_m, const ProblemVTable &vtable)
optional_function_t< void(crvec x, crvec y, crvec Σ, real_t scale, crvec v, rvec Hv) const > eval_hess_ψ_prod
optional_function_t< const Box &() const > get_box_C
Stores any of the supported sparsity patterns.
Definition sparsity.hpp:106
Struct that stores the size of a polymorphic object, as well as pointers to functions to copy,...
typename optional_function< F, VTable >::type optional_function_t
An optional function includes a void pointer to self, the arguments of F, and an additional reference...
typename required_function< F >::type required_function_t
A required function includes a void pointer to self, in addition to the arguments of F.