alpaqa 1.0.0a19
Nonconvex constrained optimization
Loading...
Searching...
No Matches
CasADiControlProblem.tpp
Go to the documentation of this file.
1#pragma once
2
10#include "CasADiLoader-util.hpp"
11
12#include <Eigen/Sparse>
13
14#if ALPAQA_WITH_EXTERNAL_CASADI
15#include <casadi/core/external.hpp>
16#endif
17
18#include <fstream>
19#include <memory>
20#include <optional>
21#include <stdexcept>
22#include <type_traits>
23
24namespace alpaqa {
26
27namespace fs = std::filesystem;
28
29namespace casadi_loader {
30
31using namespace alpaqa::casadi_loader;
32
33template <Config Conf>
36
37 static constexpr bool WithParam = true;
46
53
57
61
62 template <class Loader>
63 requires requires(Loader &&loader, const char *name) {
64 { loader(name) } -> std::same_as<casadi::Function>;
65 { loader.format_name(name) } -> std::same_as<std::string>;
66 }
67 static std::unique_ptr<CasADiControlFunctionsWithParam>
69 length_t nx, nu, nh, nh_N, nc, nc_N, p;
72 using namespace std::literals::string_literals;
73 if (ffun.n_in() != 3)
75 "Invalid number of input arguments: got "s +
76 std::to_string(ffun.n_in()) + ", should be 3.");
77 if (ffun.n_out() != 1)
79 "Invalid number of output arguments: got "s +
80 std::to_string(ffun.n_in()) + ", should be 1.");
81 nx = static_cast<length_t>(ffun.size1_in(0));
82 nu = static_cast<length_t>(ffun.size1_in(1));
83 p = static_cast<length_t>(ffun.size1_in(2));
85 f.validate_dimensions({dim(nx, 1), dim(nu, 1), dim(p, 1)},
86 {dim(nx, 1)});
87 return f;
88 };
91 using namespace std::literals::string_literals;
92 if (hfun.n_in() != 3)
94 "Invalid number of input arguments: got "s +
95 std::to_string(hfun.n_in()) + ", should be 3.");
96 if (hfun.n_out() != 1)
98 "Invalid number of output arguments: got "s +
99 std::to_string(hfun.n_in()) + ", should be 1.");
100 nh = static_cast<length_t>(hfun.size1_out(0));
102 h.validate_dimensions({dim(nx, 1), dim(nu, 1), dim(p, 1)},
103 {dim(nh, 1)});
104 return h;
105 };
108 using namespace std::literals::string_literals;
109 if (hfun.n_in() != 2)
111 "Invalid number of input arguments: got "s +
112 std::to_string(hfun.n_in()) + ", should be 2.");
113 if (hfun.n_out() != 1)
115 "Invalid number of output arguments: got "s +
116 std::to_string(hfun.n_in()) + ", should be 1.");
117 nh_N = static_cast<length_t>(hfun.size1_out(0));
119 h.validate_dimensions({dim(nx, 1), dim(p, 1)}, {dim(nh_N, 1)});
120 return h;
121 };
124 using namespace std::literals::string_literals;
125 if (cfun.n_in() != 2)
127 "Invalid number of input arguments: got "s +
128 std::to_string(cfun.n_in()) + ", should be 2.");
129 if (cfun.n_out() != 1)
131 "Invalid number of output arguments: got "s +
132 std::to_string(cfun.n_in()) + ", should be 1.");
133 nc = static_cast<length_t>(cfun.size1_out(0));
135 c.validate_dimensions({dim(nx, 1), dim(p, 1)}, {dim(nc, 1)});
136 return c;
137 };
140 using namespace std::literals::string_literals;
141 if (cfun.n_in() != 2)
143 "Invalid number of input arguments: got "s +
144 std::to_string(cfun.n_in()) + ", should be 2.");
145 if (cfun.n_out() != 1)
147 "Invalid number of output arguments: got "s +
148 std::to_string(cfun.n_in()) + ", should be 1.");
149 nc_N = static_cast<length_t>(cfun.size1_out(0));
151 c.validate_dimensions({dim(nx, 1), dim(p, 1)}, {dim(nc_N, 1)});
152 return c;
153 };
154 // Load the functions "f", "h", and "c" to determine the unknown dimensions.
155 auto f = wrap_load(loader, "f", load_f);
156 auto h = wrap_load(loader, "h", load_h);
157 auto h_N = wrap_load(loader, "h_N", load_h_N);
158 auto c = wrap_load(loader, "c", load_c);
159 auto c_N = wrap_load(loader, "c_N", load_c_N);
160
161 auto self = std::make_unique<CasADiControlFunctionsWithParam<Conf>>(
163 .nx = nx,
164 .nu = nu,
165 .nh = nh,
166 .nh_N = nh_N,
167 .nc = nc,
168 .nc_N = nc_N,
169 .p = p,
170 .f = std::move(f),
172 loader, "jacobian_f", dims(nx, nu, p),
173 dims(dim(nx, nx + nu))),
174 .grad_f_prod =
176 loader, "grad_f_prod", dims(nx, nu, p, nx),
177 dims(nx + nu)),
178 .h = std::move(h),
179 .h_N = std::move(h_N),
181 loader, "l", dims(nh, p), dims(1)),
183 loader, "l_N", dims(nh_N, p), dims(1)),
185 loader, "qr", dims(nx + nu, nh, p), dims(nx + nu)),
187 loader, "q_N", dims(nx, nh_N, p), dims(nx)),
189 loader, "Q", dims(nx + nu, nh, p), dims(dim{nx, nx})),
191 loader, "Q_N", dims(nx, nh_N, p), dims(dim{nx, nx})),
193 loader, "R", dims(nx + nu, nh, p), dims(dim{nu, nu})),
195 loader, "S", dims(nx + nu, nh, p), dims(dim{nu, nx})),
196 .c = std::move(c),
197 .grad_c_prod =
199 loader, "grad_c_prod", dims(nx, p, nc), dims(nx)),
201 loader, "gn_hess_c", dims(nx, p, nc), dims(dim{nx, nx})),
202 .c_N = std::move(c_N),
203 .grad_c_prod_N =
205 loader, "grad_c_prod_N", dims(nx, p, nc_N), dims(nx)),
206 .gn_hess_c_N =
208 loader, "gn_hess_c_N", dims(nx, p, nc_N),
209 dims(dim{nx, nx})),
210 });
211 return self;
212 }
213};
214
215} // namespace casadi_loader
216
217template <Config Conf>
219 length_t N)
220 : N{N} {
221
222 struct {
223 const std::string &filename;
224 auto operator()(const std::string &name) const {
225 return casadi::external(name, filename);
226 }
227 auto format_name(const std::string &name) const {
228 return filename + ':' + name;
229 }
230 } loader{filename};
232
233 this->nx = impl->nx;
234 this->nu = impl->nu;
235 this->nh = impl->nh;
236 this->nh_N = impl->nh_N;
237 this->nc = impl->nc;
238 this->nc_N = impl->nc_N;
239 this->x_init = vec::Constant(nx, alpaqa::NaN<Conf>);
240 this->param = vec::Constant(impl->p, alpaqa::NaN<Conf>);
241 this->U = Box{nu};
242 this->D = Box{nc};
243 this->D_N = Box{nc_N};
244
245 auto n_work = std::max({
246 impl->Q.fun.sparsity_out(0).nnz(),
247 impl->Q_N.fun.sparsity_out(0).nnz(),
248 impl->gn_hess_c.fun.sparsity_out(0).nnz(),
249 impl->gn_hess_c_N.fun.sparsity_out(0).nnz(),
250 });
251 this->work = vec::Constant(static_cast<length_t>(n_work), NaN<Conf>);
252
253 auto bounds_filepath = fs::path{filename}.replace_extension("csv");
254 if (fs::exists(bounds_filepath))
256}
257
258template <Config Conf>
260 const std::filesystem::path &filepath, char sep) {
261 // Open data file
262 std::ifstream data_file{filepath};
263 if (!data_file)
264 throw std::runtime_error("Unable to open data file \"" +
265 filepath.string() + '"');
266
267 // Helper function for reading single line of (float) data
268 index_t line = 0;
269 auto wrap_data_load = [&](std::string_view name, auto &v,
270 bool fixed_size = true) {
271 try {
272 ++line;
273 if (data_file.peek() == '\n') // Ignore empty lines
274 return static_cast<void>(data_file.get());
275 if (fixed_size) {
277 } else { // Dynamic size
278 auto s = csv::read_row_std_vector<real_t>(data_file, sep);
279 v = cmvec{s.data(), static_cast<index_t>(s.size())};
280 }
281 } catch (csv::read_error &e) {
282 // Transform any errors in something more readable
283 throw std::runtime_error("Unable to read " + std::string(name) +
284 " from data file \"" + filepath.string() +
285 ':' + std::to_string(line) +
286 "\": " + e.what());
287 }
288 };
289 // Helper function for reading a single value
290 auto read_single = [&](std::string_view name, auto &v) {
291 data_file >> v;
292 if (!data_file)
293 throw std::runtime_error("Unable to read " + std::string(name) +
294 " from data file \"" + filepath.string() +
295 ':' + std::to_string(line) + '"');
296 };
297 wrap_data_load("U.lowerbound", this->U.lowerbound);
298 wrap_data_load("U.upperbound", this->U.upperbound);
299 wrap_data_load("D.lowerbound", this->D.lowerbound);
300 wrap_data_load("D.upperbound", this->D.upperbound);
301 wrap_data_load("D_N.lowerbound", this->D_N.lowerbound);
302 wrap_data_load("D_N.upperbound", this->D_N.upperbound);
303 wrap_data_load("x_init", this->x_init);
304 wrap_data_load("param", this->param);
305 // Penalty/ALM split is a single integer
306 read_single("penalty_alm_split", this->penalty_alm_split);
307 read_single("penalty_alm_split_N", this->penalty_alm_split_N);
308}
309
310template <Config Conf>
312 default;
313template <Config Conf>
316
317template <Config Conf>
323
326
329 rvec fxu) const {
330 assert(x.size() == nx);
331 assert(u.size() == nu);
332 assert(fxu.size() == nx);
333 impl->f({x.data(), u.data(), param.data()}, {fxu.data()});
334}
335template <Config Conf>
337 rmat J_fxu) const {
338 assert(x.size() == nx);
339 assert(u.size() == nu);
340 assert(J_fxu.rows() == nx);
341 assert(J_fxu.cols() == nx + nu);
342 impl->jac_f({x.data(), u.data(), param.data()}, {J_fxu.data()});
343}
344template <Config Conf>
346 crvec p,
347 rvec grad_fxu_p) const {
348 assert(x.size() == nx);
349 assert(u.size() == nu);
350 assert(p.size() == nx);
351 assert(grad_fxu_p.size() == nx + nu);
352 impl->grad_f_prod({x.data(), u.data(), param.data(), p.data()},
353 {grad_fxu_p.data()});
354}
355template <Config Conf>
357 rvec h) const {
358 assert(x.size() == nx);
359 assert(u.size() == nu);
360 assert(h.size() == nh);
361 impl->h({x.data(), u.data(), param.data()}, {h.data()});
362}
363template <Config Conf>
365 assert(x.size() == nx);
366 assert(h.size() == nh_N);
367 impl->h_N({x.data(), param.data()}, {h.data()});
368}
369template <Config Conf>
371 assert(h.size() == nh);
372 real_t l;
373 impl->l({h.data(), param.data()}, {&l});
374 return l;
375}
376template <Config Conf>
378 assert(h.size() == nh_N);
379 real_t l;
380 impl->l_N({h.data(), param.data()}, {&l});
381 return l;
382}
383template <Config Conf>
385 rvec qr) const {
386 assert(xu.size() == nx + nu);
387 assert(h.size() == nh);
388 assert(qr.size() == nx + nu);
389 impl->qr({xu.data(), h.data(), param.data()}, {qr.data()});
390}
391template <Config Conf>
393 assert(x.size() == nx);
394 assert(h.size() == nh_N);
395 assert(q.size() == nx);
396 impl->q_N({x.data(), h.data(), param.data()}, {q.data()});
397}
398template <Config Conf>
400 rmat Q) const {
401 assert(xu.size() == nx + nu);
402 assert(h.size() == nh);
403 assert(Q.rows() == nx);
404 assert(Q.cols() == nx);
405 impl->Q({xu.data(), h.data(), param.data()}, {work.data()});
406 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
407 using cmspmat = Eigen::Map<const spmat>;
408 auto &&sparse = impl->Q.fun.sparsity_out(0);
409 if (sparse.is_dense())
410 Q += cmmat{work.data(), nx, nx};
411 else
412 Q += cmspmat{
413 nx,
414 nx,
415 static_cast<length_t>(sparse.nnz()),
416 sparse.colind(),
417 sparse.row(),
418 work.data(),
419 };
420}
421template <Config Conf>
423 assert(x.size() == nx);
424 assert(h.size() == nh_N);
425 assert(Q.rows() == nx);
426 assert(Q.cols() == nx);
427 impl->Q_N({x.data(), h.data(), param.data()}, {work.data()});
428 auto &&sparse = impl->Q_N.fun.sparsity_out(0);
429 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
430 using cmspmat = Eigen::Map<const spmat>;
431 if (sparse.is_dense())
432 Q += cmmat{work.data(), nx, nx};
433 else
434 Q += cmspmat{
435 nx,
436 nx,
437 static_cast<length_t>(sparse.nnz()),
438 sparse.colind(),
439 sparse.row(),
440 work.data(),
441 };
442}
443
444template <Config Conf>
447 rvec work) const {
448 auto &&sparse = impl->R.fun.sparsity_out(0);
449 assert(xu.size() == nx + nu);
450 assert(h.size() == nh);
451 assert(R.rows() <= nu);
452 assert(R.cols() <= nu);
453 assert(R.rows() == mask.size());
454 assert(R.cols() == mask.size());
455 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
456 impl->R({xu.data(), h.data(), param.data()}, {work.data()});
457 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
458 using cmspmat = Eigen::Map<const spmat>;
459 if (sparse.is_dense()) {
460 cmmat R_full{work.data(), nu, nu};
461 R += R_full(mask, mask);
462 } else {
464 nu,
465 nu,
466 static_cast<length_t>(sparse.nnz()),
467 sparse.colind(),
468 sparse.row(),
469 work.data(),
470 };
472 }
473}
474
475template <Config Conf>
478 rvec work) const {
479 auto &&sparse = impl->S.fun.sparsity_out(0);
480 assert(xu.size() == nx + nu);
481 assert(h.size() == nh);
482 assert(S.rows() <= nu);
483 assert(S.rows() == mask.size());
484 assert(S.cols() == nx);
485 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
486 impl->S({xu.data(), h.data(), param.data()}, {work.data()});
487 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
488 using cmspmat = Eigen::Map<const spmat>;
489 using Eigen::indexing::all;
490 if (sparse.is_dense()) {
491 cmmat S_full{work.data(), nu, nx};
492 S += S_full(mask, all);
493 } else {
495 nu,
496 nx,
497 static_cast<length_t>(sparse.nnz()),
498 sparse.colind(),
499 sparse.row(),
500 work.data(),
501 };
503 }
504}
505
506template <Config Conf>
510 crvec v, rvec out,
511 rvec work) const {
512 auto &&sparse = impl->R.fun.sparsity_out(0);
513 assert(v.size() == nu);
514 assert(out.size() == mask_J.size());
515 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
516 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
517 using cmspmat = Eigen::Map<const spmat>;
518 if (sparse.is_dense()) {
519 auto R = cmmat{work.data(), nu, nu};
520 out.noalias() += R(mask_J, mask_K) * v(mask_K);
521 } else {
522 cmspmat R{
523 nu,
524 nu,
525 static_cast<length_t>(sparse.nnz()),
526 sparse.colind(),
527 sparse.row(),
528 work.data(),
529 };
530 // out += R_full(mask_J,mask_K) * v(mask_K);
532 }
533}
534
535template <Config Conf>
538 crvec v, rvec out,
539 rvec work) const {
540 auto &&sparse = impl->S.fun.sparsity_out(0);
541 assert(v.size() == nu);
542 assert(out.size() == nx);
543 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
544 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
545 using cmspmat = Eigen::Map<const spmat>;
546 using Eigen::indexing::all;
547 if (sparse.is_dense()) {
548 auto Sᵀ = cmmat{work.data(), nu, nx}.transpose();
549 out.noalias() += Sᵀ(all, mask_K) * v(mask_K);
550 } else {
551 cmspmat S{
552 nu,
553 nx,
554 static_cast<length_t>(sparse.nnz()),
555 sparse.colind(),
556 sparse.row(),
557 work.data(),
558 };
559 // out += S(mask_K,:)ᵀ * v(mask_K);
561 }
562}
563
564template <Config Conf>
566 auto &&sparse = impl->R.fun.sparsity_out(0);
567 return static_cast<length_t>(sparse.nnz());
568}
569
570template <Config Conf>
572 auto &&sparse = impl->S.fun.sparsity_out(0);
573 return static_cast<length_t>(sparse.nnz());
574}
575
576template <Config Conf>
578 if (nc == 0)
579 return;
580 assert(x.size() == nx);
581 assert(c.size() == nc);
582 impl->c({x.data(), param.data()}, {c.data()});
583}
584
585template <Config Conf>
587 crvec p,
588 rvec grad_cx_p) const {
589 assert(x.size() == nx);
590 assert(p.size() == nc);
591 assert(grad_cx_p.size() == nx);
592 impl->grad_c_prod({x.data(), param.data(), p.data()}, {grad_cx_p.data()});
593}
594
595template <Config Conf>
597 crvec M,
598 rmat out) const {
599 auto &&sparse = impl->gn_hess_c.fun.sparsity_out(0);
600 assert(x.size() == nx);
601 assert(M.size() == nc);
602 assert(out.rows() == nx);
603 assert(out.cols() == nx);
604 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
605 impl->gn_hess_c({x.data(), param.data(), M.data()}, {work.data()});
606 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
607 using cmspmat = Eigen::Map<const spmat>;
608 if (sparse.is_dense())
609 out += cmmat{work.data(), nx, nx};
610 else
611 out += cmspmat{
612 nx,
613 nx,
614 static_cast<length_t>(sparse.nnz()),
615 sparse.colind(),
616 sparse.row(),
617 work.data(),
618 };
619}
620
621template <Config Conf>
623 if (nc_N == 0)
624 return;
625 assert(x.size() == nx);
626 assert(c.size() == nc_N);
627 impl->c_N({x.data(), param.data()}, {c.data()});
628}
629
630template <Config Conf>
632 rvec grad_cx_p) const {
633 assert(x.size() == nx);
634 assert(p.size() == nc_N);
635 assert(grad_cx_p.size() == nx);
636 impl->grad_c_prod_N({x.data(), param.data(), p.data()}, {grad_cx_p.data()});
637}
638
639template <Config Conf>
641 rmat out) const {
642 auto &&sparse = impl->gn_hess_c.fun.sparsity_out(0);
643 assert(x.size() == nx);
644 assert(M.size() == nc_N);
645 assert(out.rows() == nx);
646 assert(out.cols() == nx);
647 assert(work.size() >= static_cast<length_t>(sparse.nnz()));
648 impl->gn_hess_c_N({x.data(), param.data(), M.data()}, {work.data()});
649 using spmat = Eigen::SparseMatrix<real_t, Eigen::ColMajor, casadi_int>;
650 using cmspmat = Eigen::Map<const spmat>;
651 if (sparse.is_dense())
652 out += cmmat{work.data(), nx, nx};
653 else
654 out += cmspmat{
655 nx,
656 nx,
657 static_cast<length_t>(sparse.nnz()),
658 sparse.colind(),
659 sparse.row(),
660 work.data(),
661 };
662}
663
665} // namespace alpaqa
#define BEGIN_ALPAQA_CASADI_LOADER_NAMESPACE
#define END_ALPAQA_CASADI_LOADER_NAMESPACE
void eval_jac_f(index_t timestep, crvec x, crvec u, rmat J_fxu) const
void eval_add_R_prod_masked(index_t timestep, crvec xu, crvec h, crindexvec mask_J, crindexvec mask_K, crvec v, rvec out, rvec work) const
CasADiControlProblem & operator=(const CasADiControlProblem &)
void eval_add_gn_hess_constr_N(crvec x, crvec M, rmat out) const
void eval_qr(index_t timestep, crvec xu, crvec h, rvec qr) const
void eval_add_S_prod_masked(index_t timestep, crvec xu, crvec h, crindexvec mask_K, crvec v, rvec out, rvec work) const
void eval_constr_N(crvec x, rvec c) const
void load_numerical_data(const std::filesystem::path &filepath, char sep=',')
Load the numerical problem data (bounds and parameters) from a CSV file.
void eval_grad_constr_prod_N(crvec x, crvec p, rvec grad_cx_p) const
void eval_add_R_masked(index_t timestep, crvec xu, crvec h, crindexvec mask, rmat R, rvec work) const
real_t eval_l(index_t timestep, crvec h) const
void eval_grad_f_prod(index_t timestep, crvec x, crvec u, crvec p, rvec grad_fxu_p) const
void eval_add_gn_hess_constr(index_t timestep, crvec x, crvec M, rmat out) const
void eval_constr(index_t timestep, crvec x, rvec c) const
void eval_h(index_t timestep, crvec x, crvec u, rvec h) const
util::copyable_unique_ptr< Functions > impl
void eval_add_S_masked(index_t timestep, crvec xu, crvec h, crindexvec mask, rmat S, rvec work) const
void eval_q_N(crvec x, crvec h, rvec q) const
void eval_add_Q_N(crvec x, crvec h, rmat Q) const
void eval_grad_constr_prod(index_t timestep, crvec x, crvec p, rvec grad_cx_p) const
void eval_add_Q(index_t timestep, crvec xu, crvec h, rmat Q) const
CasADiControlProblem(const std::string &filename, length_t N)
void eval_h_N(crvec x, rvec h) const
Class that loads and calls pre-compiled CasADi functions in a DLL/SO file.
Class for evaluating CasADi functions, allocating the necessary workspace storage in advance for allo...
static void validate_dimensions(const casadi::Function &fun, const std::array< casadi_dim, N_in > &dim_in={}, const std::array< casadi_dim, N_out > &dim_out={})
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:77
auto wrapped_load(Loader &&loader, const char *name, Args &&...args)
constexpr auto dims(auto... a)
std::pair< casadi_int, casadi_int > dim
auto wrap_load(Loader &&loader, const char *name, F f)
Function external(const std::string &name, const std::string &bin_name)
Load the given CasADi function from the given DLL/SO file.
void read_row(std::istream &is, Eigen::Ref< Eigen::VectorX< Eigen::Index > > v, char sep)
Definition csv.hpp:37
void sparse_matvec_add_masked_rows_cols(const SpMat &R, const CVec &v, Vec &&out, const MaskVec &mask_J, const MaskVec &mask_K)
out += R(mask_J,mask_K) * v(mask_K);
void sparse_add_masked_rows(const SpMat &S_full, Mat &&S, const MaskVec &mask)
S += S_full(mask,:)
void sparse_add_masked(const SpMat &R_full, Mat &&R, const MaskVec &mask)
R += R_full(mask,mask)
void sparse_matvec_add_transpose_masked_rows(const SpMat &S, const CVec &v, Vec &&out, const MaskVec &mask)
out += S(mask,:)ᵀ * v(mask);
typename Conf::rmat rmat
Definition config.hpp:96
typename Conf::cmmat cmmat
Definition config.hpp:95
typename Conf::real_t real_t
Definition config.hpp:86
typename Conf::index_t index_t
Definition config.hpp:104
typename Conf::length_t length_t
Definition config.hpp:103
typename Conf::cmvec cmvec
Definition config.hpp:90
constexpr const auto inf
Definition config.hpp:112
typename Conf::rvec rvec
Definition config.hpp:91
typename Conf::crvec crvec
Definition config.hpp:92
typename Conf::crindexvec crindexvec
Definition config.hpp:107
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > qr
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > jac_f
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > f
CasADiFunctionEvaluator< Conf, 1+WithParam, 1 > c
static std::unique_ptr< CasADiControlFunctionsWithParam > load(Loader &&loader)
CasADiFunctionEvaluator< Conf, 3+WithParam, 1 > grad_f_prod
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > R
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > grad_c_prod
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > q_N
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > grad_c_prod_N
CasADiFunctionEvaluator< Conf, 1+WithParam, 1 > h_N
CasADiFunctionEvaluator< Conf, 1+WithParam, 1 > l
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > Q
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > S
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > gn_hess_c_N
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > gn_hess_c
CasADiFunctionEvaluator< Conf, 1+WithParam, 1 > l_N
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > Q_N
CasADiFunctionEvaluator< Conf, 1+WithParam, 1 > c_N
CasADiFunctionEvaluator< Conf, 2+WithParam, 1 > h