alpaqa cmake-targets
Nonconvex constrained optimization
Loading...
Searching...
No Matches
sparsity-conversions.hpp
Go to the documentation of this file.
1#pragma once
2
6
7#include <algorithm>
8#include <cassert>
9#include <concepts>
10#include <numeric>
11#include <optional>
12#include <stdexcept>
13
15
16/// Converts one matrix storage format to another.
17/// @tparam From
18/// The input sparsity pattern type.
19/// @tparam To
20/// The output sparsity pattern type.
21template <class From, class To>
23
24/// Additional options for the conversion performed by @ref SparsityConverter.
25template <class To>
27
28template <Config Conf>
30
31template <Config Conf>
38 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
39 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
40 }
42 operator const to_sparsity_t &() const & { return sparsity; }
43 const to_sparsity_t &get_sparsity() const { return *this; }
44 template <class F>
45 void convert_values(const F &from, rvec to) const {
46 from(to);
47 }
48};
49
50template <Config Conf, class StorageIndex>
57#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
58 assert(util::check_uniqueness_csc(from.outer_ptr, from.inner_idx));
59#endif
60 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
61 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
62 return {
63 .rows = from.rows,
64 .cols = from.cols,
65 .symmetry = from.symmetry,
66 };
67 }
69 : from_sparsity(from), sparsity(convert_sparsity(from, request)),
70 work(from_sparsity.nnz()) {}
73 mutable vec work;
74 operator const to_sparsity_t &() const & { return sparsity; }
75 const to_sparsity_t &get_sparsity() const { return *this; }
76 template <class F>
77 void convert_values(const F &from, rvec to) const {
78 from(work);
79 to.setZero();
80 auto &&T = to.reshaped(sparsity.rows, sparsity.cols);
81 index_t l = 0;
82 for (index_t c = 0; c < from_sparsity.cols; ++c) {
83 auto inner_start = from_sparsity.outer_ptr(c);
84 auto inner_end = from_sparsity.outer_ptr(c + 1);
85 for (auto i = inner_start; i < inner_end; ++i) {
86 auto r = from_sparsity.inner_idx(i);
87 switch (from_sparsity.symmetry) {
88 case Symmetry::Unsymmetric: T(r, c) = work(l); break;
89 case Symmetry::Upper:
90 if (r > c)
91 throw std::invalid_argument(
92 "Invalid symmetric CSC matrix: upper-triangular matrix should not "
93 "have elements below the diagonal");
94 T(c, r) = T(r, c) = work(l);
95 break;
96 case Symmetry::Lower:
97 if (r < c)
98 throw std::invalid_argument(
99 "Invalid symmetric CSC matrix: lower-triangular matrix should not "
100 "have elements above the diagonal");
101 T(c, r) = T(r, c) = work(l);
102 break;
103 default: throw std::invalid_argument("Invalid symmetry");
104 }
105 ++l;
106 }
107 }
108 }
109};
110
111template <Config Conf, class StorageIndex>
118#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
119 assert(util::check_uniqueness_triplets(from.row_indices, from.col_indices));
120#endif
121 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
122 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
123 return {
124 .rows = from.rows,
125 .cols = from.cols,
126 .symmetry = from.symmetry,
127 };
128 }
130 : from_sparsity(from), sparsity(convert_sparsity(from, request)),
131 work(from_sparsity.nnz()) {}
134 mutable vec work;
135 operator const to_sparsity_t &() const & { return sparsity; }
136 const to_sparsity_t &get_sparsity() const { return *this; }
137 template <class F>
138 void convert_values(const F &from, rvec to) const {
139 from(work);
140 to.setZero();
141 auto &&T = to.reshaped(sparsity.rows, sparsity.cols);
142 for (index_t l = 0; l < from_sparsity.nnz(); ++l) {
143 auto r = from_sparsity.row_indices(l) - from_sparsity.first_index;
144 auto c = from_sparsity.col_indices(l) - from_sparsity.first_index;
145 switch (from_sparsity.symmetry) {
146 case Symmetry::Unsymmetric: T(r, c) = work(l); break;
147 case Symmetry::Upper:
148 if (r > c)
149 throw std::invalid_argument(
150 "Invalid symmetric COO matrix: upper-triangular matrix should not "
151 "have elements below the diagonal");
152 T(c, r) = T(r, c) = work(l);
153 break;
154 case Symmetry::Lower:
155 if (r < c)
156 throw std::invalid_argument(
157 "Invalid symmetric COO matrix: lower-triangular matrix should not "
158 "have elements above the diagonal");
159 T(c, r) = T(r, c) = work(l);
160 break;
161 default: throw std::invalid_argument("Invalid symmetry");
162 }
163 }
164 }
165};
166
167template <Config Conf, class StorageIndex>
169 /// Convert the index offset (zero for C/C++, one for Fortran).
170 std::optional<StorageIndex> first_index = std::nullopt;
171};
172
173template <Config Conf, class StorageIndex>
182 storage_index_t Δ = 0;
183 if (request.first_index)
184 Δ = *request.first_index;
185 switch (from.symmetry) {
187 length_t nnz = from.rows * from.cols;
188 row_indices.resize(nnz);
189 col_indices.resize(nnz);
190 index_t l = 0;
191 for (index_t c = 0; c < from.cols; ++c) {
192 for (index_t r = 0; r < from.rows; ++r) {
193 row_indices[l] = static_cast<storage_index_t>(r) + Δ;
194 col_indices[l] = static_cast<storage_index_t>(c) + Δ;
195 ++l;
196 }
197 }
198 } break;
199 case Symmetry::Upper: {
200 if (from.rows != from.cols)
201 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
202 length_t nnz = from.rows * (from.rows + 1) / 2;
203 row_indices.resize(nnz);
204 col_indices.resize(nnz);
205 index_t l = 0;
206 for (index_t c = 0; c < from.cols; ++c) {
207 for (index_t r = 0; r <= c; ++r) {
208 row_indices[l] = static_cast<storage_index_t>(r) + Δ;
209 col_indices[l] = static_cast<storage_index_t>(c) + Δ;
210 ++l;
211 }
212 }
213 } break;
214 case Symmetry::Lower:
215 throw std::invalid_argument("Lower-triangular symmetry currently not supported");
216 default: throw std::invalid_argument("Invalid symmetry");
217 }
218 return {
219 .rows = from.rows,
220 .cols = from.cols,
221 .symmetry = from.symmetry,
222 .row_indices = row_indices,
223 .col_indices = col_indices,
224 .order = to_sparsity_t::SortedByColsAndRows,
225 .first_index = request.first_index ? *request.first_index : 0,
226 };
227 }
229 : sparsity(convert_sparsity(from, request)) {
230 if (sparsity.symmetry != Symmetry::Unsymmetric)
231 work.resize(sparsity.rows * sparsity.cols);
232 }
235 mutable vec work;
236 operator const to_sparsity_t &() const & { return sparsity; }
237 const to_sparsity_t &get_sparsity() const { return *this; }
238 template <class F>
239 void convert_values(const F &from, rvec to) const {
240 if (sparsity.symmetry == Symmetry::Unsymmetric) {
241 from(to);
242 } else if (sparsity.symmetry == Symmetry::Upper) {
243 from(work);
244 auto &&f = work.reshaped(sparsity.rows, sparsity.cols);
245 auto t = to.begin();
246 for (index_t c = 0; c < sparsity.cols; ++c)
247 std::ranges::copy_backward(f.col(c).topRows(c + 1), t += c + 1);
248 }
249 }
250};
251
252template <Config Conf, class StorageIndexFrom, class StorageIndexTo>
261 storage_index_t Δ = 0;
262 if (request.first_index)
263 Δ = *request.first_index;
264 row_indices.resize(from.nnz());
265 col_indices.resize(from.nnz());
266 index_t l = 0;
267 for (index_t c = 0; c < from.cols; ++c) {
268 auto inner_start = from.outer_ptr(c);
269 auto inner_end = from.outer_ptr(c + 1);
270 for (auto i = inner_start; i < inner_end; ++i) {
271 auto r = from.inner_idx(i);
272 row_indices[l] = static_cast<storage_index_t>(r) + Δ;
273 col_indices[l] = static_cast<storage_index_t>(c) + Δ;
274 ++l;
275 }
276 }
277 return {
278 .rows = from.rows,
279 .cols = from.cols,
280 .symmetry = from.symmetry,
281 .row_indices = row_indices,
282 .col_indices = col_indices,
283 .order = from.order == from_sparsity_t::SortedRows ? to_sparsity_t::SortedByColsAndRows
284 : to_sparsity_t::SortedByColsOnly,
285 .first_index = request.first_index ? *request.first_index : 0,
286 };
287 }
289 : sparsity(convert_sparsity(from, request)) {
290#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
291 assert(util::check_uniqueness_triplets(sparsity.row_indices, sparsity.col_indices));
292#endif
293 }
296 operator const to_sparsity_t &() const & { return sparsity; }
297 const to_sparsity_t &get_sparsity() const { return *this; }
298 template <class F>
299 void convert_values(const F &from, rvec to) const {
300 from(to);
301 }
302};
303
304template <Config Conf, class StorageIndexFrom, class StorageIndexTo>
313 storage_index_t Δ = 0;
314 if (request.first_index)
315 Δ = *request.first_index - static_cast<storage_index_t>(from.first_index);
316 // Check if we can fully reuse the indices without changes
317 if constexpr (std::is_same_v<StorageIndexFrom, StorageIndexTo>)
318 if (Δ == 0)
319 return from;
320 // Otherwise, allocate space for shifted or converted indices
321 row_indices.resize(from.nnz());
322 col_indices.resize(from.nnz());
323 auto cvt_idx = [Δ](auto i) { return static_cast<storage_index_t>(i) + Δ; };
324 std::ranges::transform(from.row_indices, row_indices.begin(), cvt_idx);
325 std::ranges::transform(from.col_indices, col_indices.begin(), cvt_idx);
326 return {
327 .rows = from.rows,
328 .cols = from.cols,
329 .symmetry = from.symmetry,
330 .row_indices = row_indices,
331 .col_indices = col_indices,
332 .order = static_cast<typename to_sparsity_t::Order>(from.order),
333 .first_index = request.first_index ? *request.first_index
334 : static_cast<storage_index_t>(from.first_index),
335 };
336 }
338 : sparsity(convert_sparsity(from, request)) {
339#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
340 assert(util::check_uniqueness_triplets(sparsity.row_indices, sparsity.col_indices));
341#endif
342 }
345 operator const to_sparsity_t &() const & { return sparsity; }
346 const to_sparsity_t &get_sparsity() const { return *this; }
347 template <class F>
348 void convert_values(const F &from, rvec to) const {
349 from(to);
350 }
351};
352
353template <Config Conf, class StorageIndex>
355 /// Sort the indices.
356 std::optional<typename SparseCSC<Conf, StorageIndex>::Order> order = std::nullopt;
357};
358
359template <Config Conf, class StorageIndexFrom, class StorageIndexFromTo>
369#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
370 // Convert the indices
371 // TODO: using indexvec here could be suboptimal if the original storage
372 // index was int
373 index_vector_t row_indices(from.nnz()), col_indices(from.nnz());
374 auto cvt_idx = [](auto i) { return static_cast<storage_index_t>(i); };
375 std::ranges::transform(from.row_indices, row_indices.begin(), cvt_idx);
376 std::ranges::transform(from.col_indices, col_indices.begin(), cvt_idx);
377 // Sort the indices
378 typename to_sparsity_t::Order order;
379 if (request.order && *request.order == to_sparsity_t::SortedRows) {
380 order = to_sparsity_t::SortedRows;
381 switch (from.order) {
382 case from_sparsity_t::SortedByColsAndRows:
383 // No sorting necessary
384 break;
385 case from_sparsity_t::Unsorted: [[fallthrough]];
386 case from_sparsity_t::SortedByColsOnly: [[fallthrough]];
387 case from_sparsity_t::SortedByRowsAndCols: [[fallthrough]];
388 case from_sparsity_t::SortedByRowsOnly:
389 permutation.resize(from.nnz());
390 std::iota(permutation.begin(), permutation.end(), index_t{0});
391 util::sort_triplets(row_indices, col_indices, permutation);
392 break;
393 default: throw std::invalid_argument("Invalid order");
394 }
395 } else {
396 switch (from.order) {
397 case from_sparsity_t::SortedByColsAndRows:
398 order = to_sparsity_t::SortedRows;
399 // No sorting necessary
400 break;
401 case from_sparsity_t::SortedByColsOnly:
402 order = to_sparsity_t::Unsorted;
403 // No sorting necessary
404 break;
405 case from_sparsity_t::Unsorted: [[fallthrough]];
406 case from_sparsity_t::SortedByRowsAndCols: [[fallthrough]];
407 case from_sparsity_t::SortedByRowsOnly:
408 order = to_sparsity_t::Unsorted;
409 permutation.resize(from.nnz());
410 std::iota(permutation.begin(), permutation.end(), index_t{0});
411 util::sort_triplets_col(row_indices, col_indices, permutation);
412 break;
413 default: throw std::invalid_argument("Invalid order");
414 }
415 }
416 assert(!request.order || *request.order == order);
417 if (std::ranges::is_sorted(permutation))
418 permutation = indexvec{};
419 // Convert the sorted COO format to CSC
420 inner_idx.resize(from.nnz());
421 outer_ptr.resize(from.cols + 1);
422 util::convert_triplets_to_ccs(row_indices, col_indices, inner_idx, outer_ptr,
423 cvt_idx(from.first_index));
424 return {
425 .rows = from.rows,
426 .cols = from.cols,
427 .symmetry = from.symmetry,
428 .inner_idx = inner_idx,
429 .outer_ptr = outer_ptr,
430 .order = order,
431 };
432#else
433 throw std::runtime_error(
434 "This build of alpaqa does not support conversions from sparse COO format to CSC "
435 "format. Please recompile with a C++23-compliant compiler.");
436#endif
437 }
439 : sparsity(convert_sparsity(from, request)) {
440#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
441 assert(util::check_uniqueness_csc(sparsity.outer_ptr, sparsity.inner_idx));
442#endif
443 if (permutation.size() > 0)
444 work.resize(sparsity.nnz());
445 }
449 mutable vec work;
450 operator const to_sparsity_t &() const & { return sparsity; }
451 const to_sparsity_t &get_sparsity() const { return *this; }
452 template <class F>
453 void convert_values(const F &from, rvec to) const {
454 if (permutation.size() > 0) {
455 from(work);
456 to = work(permutation);
457 } else {
458 from(to);
459 }
460 }
461};
462
463template <Config Conf, class StorageIndexFrom, class StorageIndexTo>
473 auto cvt_idx = [](auto i) { return static_cast<storage_index_t>(i); };
474 auto convert_indices = [&] {
475 inner_idx.resize(from.inner_idx.size());
476 std::ranges::transform(from.inner_idx, inner_idx.begin(), cvt_idx);
477 outer_ptr.resize(from.outer_ptr.size());
478 std::ranges::transform(from.outer_ptr, outer_ptr.begin(), cvt_idx);
479 };
480 auto sort_indices = [&] {
481#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
483 permutation.resize(from.nnz());
484 std::iota(permutation.begin(), permutation.end(), index_t{0});
485 util::sort_rows_csc(outer_ptr, inner_idx, permutation);
486#else
487 throw std::runtime_error(
488 "This build of alpaqa does not support sorting matrices in CSC format. "
489 "Please recompile with a C++23-compliant compiler.");
490#endif
491 };
492 using Order = typename to_sparsity_t::Order;
493 bool need_sorting = false;
494 auto order = static_cast<Order>(from.order);
495 // Check whether the user requested the indices to be sorted
496 if (request.order && *request.order == to_sparsity_t::SortedRows) {
497 order = to_sparsity_t::SortedRows;
498 switch (from.order) {
499 case from_sparsity_t::Unsorted: need_sorting = true; break;
500 case from_sparsity_t::SortedRows: need_sorting = false; break;
501 default: throw std::invalid_argument("Invalid order");
502 }
503 }
504 // Convert and sort, or only convert the indices
505 if (need_sorting)
506 sort_indices();
507 else if (!std::is_same_v<StorageIndexFrom, StorageIndexTo>)
509 // Remove unnecessary permutations
510 if (std::ranges::is_sorted(permutation))
511 permutation = indexvec{};
512 // If the index types are the same, we may be able to reuse the storage
513 if constexpr (std::is_same_v<StorageIndexFrom, StorageIndexTo>)
514 return {
515 .rows = from.rows,
516 .cols = from.cols,
517 .symmetry = from.symmetry,
518 .inner_idx = need_sorting ? index_vector_view_t{inner_idx} : from.inner_idx,
519 .outer_ptr = need_sorting ? index_vector_view_t{outer_ptr} : from.outer_ptr,
520 .order = order,
521 };
522 // If the index types are not the same, we have to copy regardless
523 else
524 return {
525 .rows = from.rows,
526 .cols = from.cols,
527 .symmetry = from.symmetry,
528 .inner_idx = inner_idx,
529 .outer_ptr = outer_ptr,
530 .order = order,
531 };
532 }
534 : sparsity(convert_sparsity(from, request)) {
535#if ALPAQA_HAVE_COO_CSC_CONVERSIONS
536 assert(util::check_uniqueness_csc(sparsity.outer_ptr, sparsity.inner_idx));
537#endif
538 if (permutation.size() > 0)
539 work.resize(sparsity.nnz());
540 }
544 mutable vec work;
545 operator const to_sparsity_t &() const & { return sparsity; }
546 const to_sparsity_t &get_sparsity() const { return *this; }
547 template <class F>
548 void convert_values(const F &from, rvec to) const {
549 if (permutation.size() > 0) {
550 from(work);
551 to = work(permutation);
552 } else {
553 from(to);
554 }
555 }
556};
557
558template <Config Conf, class StorageIndex>
568 auto cvt_idx = [](auto i) { return static_cast<storage_index_t>(i); };
569 switch (from.symmetry) {
571 inner_idx.resize(from.rows * from.cols);
572 outer_ptr.resize(from.cols + 1);
573 index_t l = 0;
574 for (index_t c = 0; c < from.cols; ++c) {
575 outer_ptr[c] = cvt_idx(l);
576 for (index_t r = 0; r < from.rows; ++r) {
577 inner_idx[l] = cvt_idx(r);
578 ++l;
579 }
580 }
581 outer_ptr[from.cols] = cvt_idx(l);
582 } break;
583 case Symmetry::Upper: {
584 if (from.rows != from.cols)
585 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
586 inner_idx.resize(from.rows * (from.rows + 1) / 2);
587 outer_ptr.resize(from.cols + 1);
588 index_t l = 0;
589 for (index_t c = 0; c < from.cols; ++c) {
590 outer_ptr[c] = cvt_idx(l);
591 for (index_t r = 0; r <= c; ++r) {
592 inner_idx[l] = cvt_idx(r);
593 ++l;
594 }
595 }
596 outer_ptr[from.cols] = cvt_idx(l);
597 } break;
598 case Symmetry::Lower:
599 throw std::invalid_argument("Lower-triangular symmetry currently not supported");
600 default: throw std::invalid_argument("Invalid symmetry");
601 }
602 return {
603 .rows = from.rows,
604 .cols = from.cols,
605 .symmetry = from.symmetry,
606 .inner_idx = inner_idx,
607 .outer_ptr = outer_ptr,
608 .order = to_sparsity_t::SortedRows,
609 };
610 }
612 : sparsity(convert_sparsity(from, request)) {
613 if (sparsity.symmetry != Symmetry::Unsymmetric)
614 work.resize(sparsity.rows * sparsity.cols);
615 }
618 mutable vec work;
619 operator const to_sparsity_t &() const & { return sparsity; }
620 const to_sparsity_t &get_sparsity() const { return *this; }
621 template <class F>
622 void convert_values(const F &from, rvec to) const {
623 if (sparsity.symmetry == Symmetry::Unsymmetric) {
624 from(to);
625 } else if (sparsity.symmetry == Symmetry::Upper) {
626 from(work);
627 auto &&f = work.reshaped(sparsity.rows, sparsity.cols);
628 auto t = to.begin();
629 for (index_t c = 0; c < sparsity.cols; ++c)
630 std::ranges::copy_backward(f.col(c).topRows(c + 1), t += c + 1);
631 }
632 }
633};
634
635namespace detail {
636template <class To, class>
638
639template <class To, class... Froms>
640struct ConverterVariantHelper<To, std::variant<Froms...>> {
641 using type = std::variant<SparsityConverter<Froms, To>...>;
642};
643} // namespace detail
644
645template <class To>
648
649/// Converts any supported matrix storage format to the given format.
650/// @see @ref Sparsity
651template <class Conf, class To>
652 requires std::same_as<Conf, typename To::config_t>
659 : converter{std::visit(wrap_converter(request), from.value)} {}
661 operator const to_sparsity_t &() const {
662 return std::visit([](const auto &c) -> const to_sparsity_t & { return c; }, converter);
663 }
664 const to_sparsity_t &get_sparsity() const { return *this; }
665 template <class F>
666 void convert_values(const F &from, rvec to) const {
667 std::visit([&](const auto &c) { c.convert_values(from, to); }, converter);
668 }
669 void convert_values(crvec from, rvec to) const = delete;
670
671 private:
673 return [request]<class From>(const From &from) -> ConverterVariant<To> {
675 };
676 }
677};
678
679} // namespace alpaqa::sparsity
#define USING_ALPAQA_CONFIG(Conf)
Definition config.hpp:56
@ Upper
Symmetric, upper-triangular part is stored.
@ Lower
Symmetric, lower-triangular part is stored.
detail::ConverterVariantHelper< To, SparsityVariant< typename To::config_t > >::type ConverterVariant
Dense matrix structure.
Definition sparsity.hpp:21
Additional options for the conversion performed by SparsityConverter.
Converts one matrix storage format to another.
typename Conf::indexvec indexvec
Definition config.hpp:78
typename Conf::index_t index_t
Definition config.hpp:77
typename Conf::length_t length_t
Definition config.hpp:76
constexpr const auto inf
Definition config.hpp:85
typename Conf::rvec rvec
Definition config.hpp:69
typename Conf::crvec crvec
Definition config.hpp:70
typename Conf::vec vec
Definition config.hpp:66
Sparse coordinate list structure (COO).
Definition sparsity.hpp:56
index_vector_view_t col_indices
Definition sparsity.hpp:65
storage_index_t first_index
Zero for C/C++, one for Fortran.
Definition sparsity.hpp:83
length_t nnz() const
Get the number of structurally nonzero elements.
Definition sparsity.hpp:86
Eigen::VectorX< storage_index_t > index_vector_t
Definition sparsity.hpp:59
index_vector_view_t row_indices
Definition sparsity.hpp:64
Sparse compressed-column structure (CCS or CSC).
Definition sparsity.hpp:29
index_vector_view_t outer_ptr
Definition sparsity.hpp:38
Eigen::Ref< const index_vector_t > index_vector_view_t
Definition sparsity.hpp:33
Eigen::VectorX< storage_index_t > index_vector_t
Definition sparsity.hpp:32
index_vector_view_t inner_idx
Definition sparsity.hpp:37
void convert_values(crvec from, rvec to) const =delete
SparsityConverter(Sparsity< config_t > from, Request request={})
Stores any of the supported sparsity patterns.
Definition sparsity.hpp:106