WPILibC++ 2023.4.3-108-ge5452e3
SparseMatrix.h
Go to the documentation of this file.
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
15/** \ingroup SparseCore_Module
16 *
17 * \class SparseMatrix
18 *
19 * \brief A versatible sparse matrix representation
20 *
21 * This class implements a more versatile variants of the common \em compressed row/column storage format.
22 * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23 * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24 * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25 * can be done with limited memory reallocation and copies.
26 *
27 * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28 * compatible with many library.
29 *
30 * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31 *
32 * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33 * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34 * is ColMajor or RowMajor. The default is 0 which means column-major.
35 * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36 *
37 * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38 * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39 * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40 *
41 * This class can be extended with the help of the plugin mechanism described on the page
42 * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43 */
44
45namespace internal {
46template<typename _Scalar, int _Options, typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48{
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
53 enum {
54 RowsAtCompileTime = Dynamic,
55 ColsAtCompileTime = Dynamic,
56 MaxRowsAtCompileTime = Dynamic,
57 MaxColsAtCompileTime = Dynamic,
59 SupportedAccessPatterns = InnerRandomAccessPattern
60 };
61};
62
63template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65{
69
70 typedef _Scalar Scalar;
72 typedef _StorageIndex StorageIndex;
74
75 enum {
76 RowsAtCompileTime = Dynamic,
77 ColsAtCompileTime = 1,
78 MaxRowsAtCompileTime = Dynamic,
79 MaxColsAtCompileTime = 1,
80 Flags = LvalueBit
81 };
82};
83
84template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87{
88 enum {
89 Flags = 0
90 };
91};
92
93} // end namespace internal
94
95template<typename _Scalar, int _Options, typename _StorageIndex>
97 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98{
101 friend class SparseVector<_Scalar,0,_StorageIndex>;
102 template<typename, typename, typename, typename, typename>
103 friend struct internal::Assignment;
104 public:
105 using Base::isCompressed;
106 using Base::nonZeros;
108 using Base::operator+=;
109 using Base::operator-=;
110
116
117
118 using Base::IsRowMajor;
120 enum {
121 Options = _Options
122 };
123
126 protected:
128
132 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
134
135 public:
136
137 /** \returns the number of rows of the matrix */
138 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
139 /** \returns the number of columns of the matrix */
140 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
142 /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
143 inline Index innerSize() const { return m_innerSize; }
144 /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
145 inline Index outerSize() const { return m_outerSize; }
146
147 /** \returns a const pointer to the array of values.
148 * This function is aimed at interoperability with other libraries.
149 * \sa innerIndexPtr(), outerIndexPtr() */
150 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
151 /** \returns a non-const pointer to the array of values.
152 * This function is aimed at interoperability with other libraries.
153 * \sa innerIndexPtr(), outerIndexPtr() */
154 inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
156 /** \returns a const pointer to the array of inner indices.
157 * This function is aimed at interoperability with other libraries.
158 * \sa valuePtr(), outerIndexPtr() */
159 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
160 /** \returns a non-const pointer to the array of inner indices.
161 * This function is aimed at interoperability with other libraries.
162 * \sa valuePtr(), outerIndexPtr() */
164
165 /** \returns a const pointer to the array of the starting positions of the inner vectors.
166 * This function is aimed at interoperability with other libraries.
167 * \sa valuePtr(), innerIndexPtr() */
168 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
169 /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
170 * This function is aimed at interoperability with other libraries.
171 * \sa valuePtr(), innerIndexPtr() */
173
174 /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
175 * This function is aimed at interoperability with other libraries.
176 * \warning it returns the null pointer 0 in compressed mode */
177 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
178 /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
179 * This function is aimed at interoperability with other libraries.
180 * \warning it returns the null pointer 0 in compressed mode */
182
183 /** \internal */
184 inline Storage& data() { return m_data; }
185 /** \internal */
186 inline const Storage& data() const { return m_data; }
187
188 /** \returns the value of the matrix at position \a i, \a j
189 * This function returns Scalar(0) if the element is an explicit \em zero */
190 inline Scalar coeff(Index row, Index col) const
191 {
192 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
194 const Index outer = IsRowMajor ? row : col;
195 const Index inner = IsRowMajor ? col : row;
196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198 }
199
200 /** \returns a non-const reference to the value of the matrix at position \a i, \a j
201 *
202 * If the element does not exist then it is inserted via the insert(Index,Index) function
203 * which itself turns the matrix into a non compressed form if that was not the case.
204 *
205 * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
206 * function if the element does not already exist.
207 */
209 {
210 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
212 const Index outer = IsRowMajor ? row : col;
213 const Index inner = IsRowMajor ? col : row;
214
215 Index start = m_outerIndex[outer];
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218 if(end<=start)
219 return insert(row,col);
220 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221 if((p<end) && (m_data.index(p)==inner))
222 return m_data.value(p);
223 else
224 return insert(row,col);
225 }
226
227 /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
228 * The non zero coefficient must \b not already exist.
229 *
230 * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
231 * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
232 * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
233 * inserted by increasing outer-indices.
234 *
235 * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
236 * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
237 *
238 * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
239 * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
240 *
241 */
243
244 public:
245
246 /** Removes all non zeros but keep allocated memory
247 *
248 * This function does not free the currently allocated memory. To release as much as memory as possible,
249 * call \code mat.data().squeeze(); \endcode after resizing it.
250 *
251 * \sa resize(Index,Index), data()
252 */
253 inline void setZero()
254 {
255 m_data.clear();
256 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
258 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259 }
260
261 /** Preallocates \a reserveSize non zeros.
262 *
263 * Precondition: the matrix must be in compressed mode. */
264 inline void reserve(Index reserveSize)
265 {
266 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267 m_data.reserve(reserveSize);
268 }
269
270 #ifdef EIGEN_PARSED_BY_DOXYGEN
271 /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
272 *
273 * This function turns the matrix in non-compressed mode.
274 *
275 * The type \c SizesType must expose the following interface:
276 \code
277 typedef value_type;
278 const value_type& operator[](i) const;
279 \endcode
280 * for \c i in the [0,this->outerSize()[ range.
281 * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
282 */
283 template<class SizesType>
284 inline void reserve(const SizesType& reserveSizes);
285 #else
286 template<class SizesType>
287 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289 typename
290 #endif
291 SizesType::value_type())
292 {
293 EIGEN_UNUSED_VARIABLE(enableif);
294 reserveInnerVectors(reserveSizes);
295 }
296 #endif // EIGEN_PARSED_BY_DOXYGEN
297 protected:
298 template<class SizesType>
299 inline void reserveInnerVectors(const SizesType& reserveSizes)
300 {
301 if(isCompressed())
302 {
303 Index totalReserveSize = 0;
304 // turn the matrix into non-compressed mode
305 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
307
308 // temporarily use m_innerSizes to hold the new starting points.
309 StorageIndex* newOuterIndex = m_innerNonZeros;
310
312 for(Index j=0; j<m_outerSize; ++j)
313 {
314 newOuterIndex[j] = count;
315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316 totalReserveSize += reserveSizes[j];
317 }
318 m_data.reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(Index j=m_outerSize-1; j>=0; --j)
321 {
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323 for(Index i=innerNNZ-1; i>=0; --i)
324 {
325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327 }
328 previousOuterIndex = m_outerIndex[j];
329 m_outerIndex[j] = newOuterIndex[j];
330 m_innerNonZeros[j] = innerNNZ;
331 }
332 if(m_outerSize>0)
334
336 }
337 else
338 {
339 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340 if (!newOuterIndex) internal::throw_std_bad_alloc();
341
343 for(Index j=0; j<m_outerSize; ++j)
344 {
345 newOuterIndex[j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[j];
349 }
350 newOuterIndex[m_outerSize] = count;
351
353 for(Index j=m_outerSize-1; j>=0; --j)
354 {
355 Index offset = newOuterIndex[j] - m_outerIndex[j];
356 if(offset>0)
357 {
358 StorageIndex innerNNZ = m_innerNonZeros[j];
359 for(Index i=innerNNZ-1; i>=0; --i)
360 {
361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363 }
364 }
365 }
366
367 std::swap(m_outerIndex, newOuterIndex);
368 std::free(newOuterIndex);
369 }
370
371 }
372 public:
373
374 //--- low level purely coherent filling ---
375
376 /** \internal
377 * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
378 * - the nonzero does not already exist
379 * - the new coefficient is the last one according to the storage order
380 *
381 * Before filling a given inner vector you must call the statVec(Index) function.
382 *
383 * After an insertion session, you should call the finalize() function.
384 *
385 * \sa insert, insertBackByOuterInner, startVec */
387 {
389 }
390
391 /** \internal
392 * \sa insertBack, startVec */
394 {
395 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
399 m_data.append(Scalar(0), inner);
400 return m_data.value(p);
401 }
402
403 /** \internal
404 * \warning use it only if you know what you are doing */
406 {
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
409 m_data.append(Scalar(0), inner);
410 return m_data.value(p);
411 }
412
413 /** \internal
414 * \sa insertBack, insertBackByOuterInner */
415 inline void startVec(Index outer)
416 {
417 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
420 }
421
422 /** \internal
423 * Must be called after inserting a set of non zero entries using the low level compressed API.
424 */
425 inline void finalize()
426 {
427 if(isCompressed())
428 {
429 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430 Index i = m_outerSize;
431 // find the last filled column
432 while (i>=0 && m_outerIndex[i]==0)
433 --i;
434 ++i;
435 while (i<=m_outerSize)
436 {
437 m_outerIndex[i] = size;
438 ++i;
439 }
440 }
441 }
442
443 //---
444
445 template<typename InputIterators>
446 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447
448 template<typename InputIterators,typename DupFunctor>
449 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450
452
453 template<typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455
456 //---
457
458 /** \internal
459 * same as insert(Index,Index) except that the indices are given relative to the storage order */
461 {
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463 }
464
465 /** Turns the matrix into the \em compressed format.
466 */
468 {
469 if(isCompressed())
470 return;
471
473
474 Index oldStart = m_outerIndex[1];
476 for(Index j=1; j<m_outerSize; ++j)
477 {
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
480 if(offset>0)
481 {
482 for(Index k=0; k<m_innerNonZeros[j]; ++k)
483 {
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486 }
487 }
489 oldStart = nextOldStart;
490 }
492 m_innerNonZeros = 0;
494 m_data.squeeze();
495 }
496
497 /** Turns the matrix into the uncompressed mode */
499 {
500 if(m_innerNonZeros != 0)
501 return;
502 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503 for (Index i = 0; i < m_outerSize; i++)
504 {
506 }
507 }
508
509 /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
510 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511 {
512 prune(default_prunning_func(reference,epsilon));
513 }
514
515 /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
516 * The functor type \a KeepFunc must implement the following function:
517 * \code
518 * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
519 * \endcode
520 * \sa prune(Scalar,RealScalar)
521 */
522 template<typename KeepFunc>
523 void prune(const KeepFunc& keep = KeepFunc())
524 {
525 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
527
528 StorageIndex k = 0;
529 for(Index j=0; j<m_outerSize; ++j)
530 {
531 Index previousStart = m_outerIndex[j];
532 m_outerIndex[j] = k;
533 Index end = m_outerIndex[j+1];
534 for(Index i=previousStart; i<end; ++i)
535 {
536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537 {
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
540 ++k;
541 }
542 }
543 }
545 m_data.resize(k,0);
546 }
547
548 /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
549 *
550 * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
551 * and the storage of the out of bounds coefficients is kept and reserved.
552 * Call makeCompressed() to pack the entries and squeeze extra memory.
553 *
554 * \sa reserve(), setZero(), makeCompressed()
555 */
557 {
558 // No change
559 if (this->rows() == rows && this->cols() == cols) return;
560
561 // If one dimension is null, then there is nothing to be preserved
562 if(rows==0 || cols==0) return resize(rows,cols);
563
564 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
568 // Deals with inner non zeros
569 if (m_innerNonZeros)
570 {
571 // Resize m_innerNonZeros
572 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
575
576 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
578 }
579 else if (innerChange < 0)
580 {
581 // Inner size decreased: allocate a new m_innerNonZeros
582 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
584 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
586 for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587 m_innerNonZeros[i] = 0;
588 }
589
590 // Change the m_innerNonZeros in case of a decrease of inner size
591 if (m_innerNonZeros && innerChange < 0)
592 {
593 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594 {
596 StorageIndex start = m_outerIndex[i];
597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598 }
599 }
600
601 m_innerSize = newInnerSize;
602
603 // Re-allocate outer index structure if necessary
604 if (outerChange == 0)
605 return;
606
607 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608 if (!newOuterIndex) internal::throw_std_bad_alloc();
609 m_outerIndex = newOuterIndex;
610 if (outerChange > 0)
611 {
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614 m_outerIndex[i] = lastIdx;
615 }
616 m_outerSize += outerChange;
617 }
618
619 /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
620 *
621 * This function does not free the currently allocated memory. To release as much as memory as possible,
622 * call \code mat.data().squeeze(); \endcode after resizing it.
623 *
624 * \sa reserve(), setZero()
625 */
627 {
628 const Index outerSize = IsRowMajor ? rows : cols;
630 m_data.clear();
631 if (m_outerSize != outerSize || m_outerSize==0)
632 {
634 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
636
638 }
640 {
642 m_innerNonZeros = 0;
643 }
644 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645 }
646
647 /** \internal
648 * Resize the nonzero vector to \a size */
650 {
652 }
653
654 /** \returns a const expression of the diagonal coefficients. */
656
657 /** \returns a read-write expression of the diagonal coefficients.
658 * \warning If the diagonal entries are written, then all diagonal
659 * entries \b must already exist, otherwise an assertion will be raised.
660 */
662
663 /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
666 {
667 check_template_parameters();
668 resize(0, 0);
669 }
670
671 /** Constructs a \a rows \c x \a cols empty matrix */
674 {
675 check_template_parameters();
676 resize(rows, cols);
677 }
678
679 /** Constructs a sparse matrix from the sparse expression \a other */
680 template<typename OtherDerived>
683 {
685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686 check_template_parameters();
687 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688 if (needToTranspose)
689 *this = other.derived();
690 else
691 {
692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694 #endif
696 }
697 }
698
699 /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
700 template<typename OtherDerived, unsigned int UpLo>
703 {
704 check_template_parameters();
705 Base::operator=(other);
706 }
707
708 /** Copy constructor (it performs a deep copy) */
709 inline SparseMatrix(const SparseMatrix& other)
711 {
712 check_template_parameters();
713 *this = other.derived();
714 }
715
716 /** \brief Copy constructor with in-place evaluation */
717 template<typename OtherDerived>
720 {
721 check_template_parameters();
722 initAssignment(other);
723 other.evalTo(*this);
724 }
725
726 /** \brief Copy constructor with in-place evaluation */
727 template<typename OtherDerived>
730 {
731 check_template_parameters();
732 *this = other.derived();
733 }
734
735 /** Swaps the content of two sparse matrices of the same type.
736 * This is a fast operation that simply swaps the underlying pointers and parameters. */
737 inline void swap(SparseMatrix& other)
738 {
739 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
744 m_data.swap(other.m_data);
745 }
746
747 /** Sets *this to the identity matrix.
748 * This function also turns the matrix into compressed mode, and drop any reserved memory. */
749 inline void setIdentity()
750 {
751 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752 this->m_data.resize(rows());
753 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
757 m_innerNonZeros = 0;
758 }
759 inline SparseMatrix& operator=(const SparseMatrix& other)
760 {
761 if (other.isRValue())
762 {
763 swap(other.const_cast_derived());
764 }
765 else if(this!=&other)
766 {
767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769 #endif
770 initAssignment(other);
771 if(other.isCompressed())
772 {
774 m_data = other.m_data;
775 }
776 else
777 {
778 Base::operator=(other);
779 }
780 }
781 return *this;
782 }
783
784#ifndef EIGEN_PARSED_BY_DOXYGEN
785 template<typename OtherDerived>
787 { return Base::operator=(other.derived()); }
788
789 template<typename Lhs, typename Rhs>
791#endif // EIGEN_PARSED_BY_DOXYGEN
792
793 template<typename OtherDerived>
795
796 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797 {
799 s << "Nonzero entries:\n";
800 if(m.isCompressed())
801 {
802 for (Index i=0; i<m.nonZeros(); ++i)
803 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804 }
805 else
806 {
807 for (Index i=0; i<m.outerSize(); ++i)
808 {
809 Index p = m.m_outerIndex[i];
810 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811 Index k=p;
812 for (; k<pe; ++k) {
813 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814 }
815 for (; k<m.m_outerIndex[i+1]; ++k) {
816 s << "(_,_) ";
817 }
818 }
819 }
820 s << std::endl;
821 s << std::endl;
822 s << "Outer pointers:\n";
823 for (Index i=0; i<m.outerSize(); ++i) {
824 s << m.m_outerIndex[i] << " ";
825 }
826 s << " $" << std::endl;
827 if(!m.isCompressed())
828 {
829 s << "Inner non zeros:\n";
830 for (Index i=0; i<m.outerSize(); ++i) {
831 s << m.m_innerNonZeros[i] << " ";
832 }
833 s << " $" << std::endl;
834 }
835 s << std::endl;
836 );
837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838 return s;
839 }
840
841 /** Destructor */
843 {
844 std::free(m_outerIndex);
845 std::free(m_innerNonZeros);
846 }
847
848 /** Overloaded for performance */
849 Scalar sum() const;
850
851# ifdef EIGEN_SPARSEMATRIX_PLUGIN
852# include EIGEN_SPARSEMATRIX_PLUGIN
853# endif
854
855protected:
856
857 template<typename Other>
858 void initAssignment(const Other& other)
859 {
860 resize(other.rows(), other.cols());
861 if(m_innerNonZeros)
862 {
863 std::free(m_innerNonZeros);
864 m_innerNonZeros = 0;
865 }
866 }
867
868 /** \internal
869 * \sa insert(Index,Index) */
871
872 /** \internal
873 * A vector object that is equal to 0 everywhere but v at the position i */
875 {
876 StorageIndex m_index;
877 StorageIndex m_value;
878 public:
881 : m_index(convert_index(i)), m_value(convert_index(v))
882 {}
883
884 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885 };
886
887 /** \internal
888 * \sa insert(Index,Index) */
890
891public:
892 /** \internal
893 * \sa insert(Index,Index) */
895 {
896 const Index outer = IsRowMajor ? row : col;
897 const Index inner = IsRowMajor ? col : row;
898
899 eigen_assert(!isCompressed());
900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901
902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903 m_data.index(p) = convert_index(inner);
904 return (m_data.value(p) = Scalar(0));
905 }
906protected:
908 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
911 };
912
913 /** \internal assign \a diagXpr to the diagonal of \c *this
914 * There are different strategies:
915 * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
916 * 2 - otherwise, for each diagonal coeff,
917 * 2.a - if it already exists, then we update it,
918 * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
919 * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
920 * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
921 *
922 * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
923 * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
924 * then it *might* be better to disable case 2.b since they will have to be copied anyway.
925 */
926 template<typename DiagXpr, typename Func>
927 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928 {
929 Index n = diagXpr.size();
930
932 if(overwrite)
933 {
934 if((this->rows()!=n) || (this->cols()!=n))
935 this->resize(n, n);
936 }
937
938 if(m_data.size()==0 || overwrite)
939 {
940 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941 this->makeCompressed();
942 this->resizeNonZeros(n);
943 Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944 Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945 Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946 values.setZero();
947 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948 }
949 else
950 {
951 bool isComp = isCompressed();
952 internal::evaluator<DiagXpr> diaEval(diagXpr);
953 std::vector<IndexPosPair> newEntries;
954
955 // 1 - try in-place update and record insertion failures
956 for(Index i = 0; i<n; ++i)
957 {
958 internal::LowerBoundIndex lb = this->lower_bound(i,i);
959 Index p = lb.value;
960 if(lb.found)
961 {
962 // the coeff already exists
963 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964 }
965 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966 {
967 // non compressed mode with local room for inserting one element
968 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969 m_innerNonZeros[i]++;
970 m_data.value(p) = Scalar(0);
971 m_data.index(p) = StorageIndex(i);
972 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973 }
974 else
975 {
976 // defer insertion
977 newEntries.push_back(IndexPosPair(i,p));
978 }
979 }
980 // 2 - insert deferred entries
981 Index n_entries = Index(newEntries.size());
982 if(n_entries>0)
983 {
984 Storage newData(m_data.size()+n_entries);
985 Index prev_p = 0;
986 Index prev_i = 0;
987 for(Index k=0; k<n_entries;++k)
988 {
989 Index i = newEntries[k].i;
990 Index p = newEntries[k].p;
991 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993 for(Index j=prev_i;j<i;++j)
994 m_outerIndex[j+1] += k;
995 if(!isComp)
996 m_innerNonZeros[i]++;
997 prev_p = p;
998 prev_i = i;
999 newData.value(p+k) = Scalar(0);
1000 newData.index(p+k) = StorageIndex(i);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002 }
1003 {
1004 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006 for(Index j=prev_i+1;j<=m_outerSize;++j)
1007 m_outerIndex[j] += n_entries;
1008 }
1009 m_data.swap(newData);
1010 }
1011 }
1012 }
1013
1014private:
1015 static void check_template_parameters()
1016 {
1017 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019 }
1020
1021 struct default_prunning_func {
1022 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023 inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024 {
1025 return !internal::isMuchSmallerThan(value, reference, epsilon);
1026 }
1027 Scalar reference;
1028 RealScalar epsilon;
1029 };
1030};
1031
1032namespace internal {
1033
1034template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036{
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038 typedef typename SparseMatrixType::Scalar Scalar;
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1041
1042 if(begin!=end)
1043 {
1044 // pass 1: count the nnz per inner-vector
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046 wi.setZero();
1047 for(InputIterator it(begin); it!=end; ++it)
1048 {
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1051 }
1052
1053 // pass 2: insert all the elements into trMat
1054 trMat.reserve(wi);
1055 for(InputIterator it(begin); it!=end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
1058 // pass 3:
1059 trMat.collapseDuplicates(dup_func);
1060 }
1061
1062 // pass 4: transposed copy -> implicit sorting
1063 mat = trMat;
1064}
1065
1066}
1067
1068
1069/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1070 *
1071 * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072 * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1073 * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074 * This is a \em O(n) operation, with \em n the number of triplet elements.
1075 * The initial contents of \c *this is destroyed.
1076 * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077 * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078 *
1079 * The \a InputIterators value_type must provide the following interface:
1080 * \code
1081 * Scalar value() const; // the value
1082 * Scalar row() const; // the row index i
1083 * Scalar col() const; // the column index j
1084 * \endcode
1085 * See for instance the Eigen::Triplet template class.
1086 *
1087 * Here is a typical usage example:
1088 * \code
1089 typedef Triplet<double> T;
1090 std::vector<T> tripletList;
1091 tripletList.reserve(estimation_of_entries);
1092 for(...)
1093 {
1094 // ...
1095 tripletList.push_back(T(i,j,v_ij));
1096 }
1097 SparseMatrixType m(rows,cols);
1098 m.setFromTriplets(tripletList.begin(), tripletList.end());
1099 // m is ready to go!
1100 * \endcode
1101 *
1102 * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1103 * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104 * be explicitly stored into a std::vector for instance.
1105 */
1106template<typename Scalar, int _Options, typename _StorageIndex>
1107template<typename InputIterators>
1108void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109{
1110 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111}
1112
1113/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1114 * \code
1115 * value = dup_func(OldValue, NewValue)
1116 * \endcode
1117 * Here is a C++11 example keeping the latest entry only:
1118 * \code
1119 * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1120 * \endcode
1121 */
1122template<typename Scalar, int _Options, typename _StorageIndex>
1123template<typename InputIterators,typename DupFunctor>
1124void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125{
1126 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127}
1128
1129/** \internal */
1130template<typename Scalar, int _Options, typename _StorageIndex>
1131template<typename DupFunctor>
1133{
1134 eigen_assert(!isCompressed());
1135 // TODO, in practice we should be able to use m_innerNonZeros for that task
1136 IndexVector wi(innerSize());
1137 wi.fill(-1);
1138 StorageIndex count = 0;
1139 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140 for(Index j=0; j<outerSize(); ++j)
1141 {
1142 StorageIndex start = count;
1143 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145 {
1146 Index i = m_data.index(k);
1147 if(wi(i)>=start)
1148 {
1149 // we already meet this entry => accumulate it
1150 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151 }
1152 else
1153 {
1154 m_data.value(count) = m_data.value(k);
1155 m_data.index(count) = m_data.index(k);
1156 wi(i) = count;
1157 ++count;
1158 }
1159 }
1160 m_outerIndex[j] = start;
1161 }
1162 m_outerIndex[m_outerSize] = count;
1163
1164 // turn the matrix into compressed form
1165 std::free(m_innerNonZeros);
1166 m_innerNonZeros = 0;
1167 m_data.resize(m_outerIndex[m_outerSize]);
1168}
1169
1170template<typename Scalar, int _Options, typename _StorageIndex>
1171template<typename OtherDerived>
1173{
1175 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
1177 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179 #endif
1180
1181 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182 if (needToTranspose)
1183 {
1184 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186 #endif
1187 // two passes algorithm:
1188 // 1 - compute the number of coeffs per dest inner vector
1189 // 2 - do the actual copy/eval
1190 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1192 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194 OtherCopy otherCopy(other.derived());
1195 OtherCopyEval otherCopyEval(otherCopy);
1196
1197 SparseMatrix dest(other.rows(),other.cols());
1198 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
1200 // pass 1
1201 // FIXME the above copy could be merged with that pass
1202 for (Index j=0; j<otherCopy.outerSize(); ++j)
1203 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204 ++dest.m_outerIndex[it.index()];
1205
1206 // prefix sum
1207 StorageIndex count = 0;
1208 IndexVector positions(dest.outerSize());
1209 for (Index j=0; j<dest.outerSize(); ++j)
1210 {
1211 StorageIndex tmp = dest.m_outerIndex[j];
1212 dest.m_outerIndex[j] = count;
1213 positions[j] = count;
1214 count += tmp;
1215 }
1216 dest.m_outerIndex[dest.outerSize()] = count;
1217 // alloc
1218 dest.m_data.resize(count);
1219 // pass 2
1220 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221 {
1222 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223 {
1224 Index pos = positions[it.index()]++;
1225 dest.m_data.index(pos) = j;
1226 dest.m_data.value(pos) = it.value();
1227 }
1228 }
1229 this->swap(dest);
1230 return *this;
1231 }
1232 else
1233 {
1234 if(other.isRValue())
1235 {
1236 initAssignment(other.derived());
1237 }
1238 // there is no special optimization
1239 return Base::operator=(other.derived());
1240 }
1241}
1242
1243template<typename _Scalar, int _Options, typename _StorageIndex>
1245{
1246 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
1248 const Index outer = IsRowMajor ? row : col;
1249 const Index inner = IsRowMajor ? col : row;
1250
1251 if(isCompressed())
1252 {
1253 if(nonZeros()==0)
1254 {
1255 // reserve space if not already done
1256 if(m_data.allocatedSize()==0)
1257 m_data.reserve(2*m_innerSize);
1258
1259 // turn the matrix into non-compressed mode
1260 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
1263 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
1265 // pack all inner-vectors to the end of the pre-allocated space
1266 // and allocate the entire free-space to the first inner-vector
1268 for(Index j=1; j<=m_outerSize; ++j)
1269 m_outerIndex[j] = end;
1270 }
1271 else
1272 {
1273 // turn the matrix into non-compressed mode
1274 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276 for(Index j=0; j<m_outerSize; ++j)
1277 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278 }
1279 }
1280
1281 // check whether we can do a fast "push back" insertion
1282 Index data_end = m_data.allocatedSize();
1283
1284 // First case: we are filling a new inner vector which is packed at the end.
1285 // We assume that all remaining inner-vectors are also empty and packed to the end.
1286 if(m_outerIndex[outer]==data_end)
1287 {
1288 eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
1290 // pack previous empty inner-vectors to end of the used-space
1291 // and allocate the entire free-space to the current inner-vector.
1292 StorageIndex p = convert_index(m_data.size());
1293 Index j = outer;
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] = p;
1296
1297 // push back the new element
1298 ++m_innerNonZeros[outer];
1299 m_data.append(Scalar(0), inner);
1300
1301 // check for reallocation
1302 if(data_end != m_data.allocatedSize())
1303 {
1304 // m_data has been reallocated
1305 // -> move remaining inner-vectors back to the end of the free-space
1306 // so that the entire free-space is allocated to the current inner-vector.
1307 eigen_internal_assert(data_end < m_data.allocatedSize());
1308 StorageIndex new_end = convert_index(m_data.allocatedSize());
1309 for(Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1312 }
1313 return m_data.value(p);
1314 }
1315
1316 // Second case: the next inner-vector is packed to the end
1317 // and the current inner-vector end match the used-space.
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319 {
1320 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
1322 // add space for the new element
1323 ++m_innerNonZeros[outer];
1324 m_data.resize(m_data.size()+1);
1325
1326 // check for reallocation
1327 if(data_end != m_data.allocatedSize())
1328 {
1329 // m_data has been reallocated
1330 // -> move remaining inner-vectors back to the end of the free-space
1331 // so that the entire free-space is allocated to the current inner-vector.
1332 eigen_internal_assert(data_end < m_data.allocatedSize());
1333 StorageIndex new_end = convert_index(m_data.allocatedSize());
1334 for(Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1337 }
1338
1339 // and insert it at the right position (sorted insertion)
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.index(p-1) > inner) )
1343 {
1344 m_data.index(p) = m_data.index(p-1);
1345 m_data.value(p) = m_data.value(p-1);
1346 --p;
1347 }
1348
1349 m_data.index(p) = convert_index(inner);
1350 return (m_data.value(p) = Scalar(0));
1351 }
1352
1353 if(m_data.size() != m_data.allocatedSize())
1354 {
1355 // make sure the matrix is compatible to random un-compressed insertion:
1356 m_data.resize(m_data.allocatedSize());
1357 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358 }
1359
1360 return insertUncompressed(row,col);
1361}
1362
1363template<typename _Scalar, int _Options, typename _StorageIndex>
1365{
1366 eigen_assert(!isCompressed());
1367
1368 const Index outer = IsRowMajor ? row : col;
1369 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
1371 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372 StorageIndex innerNNZ = m_innerNonZeros[outer];
1373 if(innerNNZ>=room)
1374 {
1375 // this inner vector is full, we need to reallocate the whole buffer :(
1376 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377 }
1378
1379 Index startId = m_outerIndex[outer];
1380 Index p = startId + m_innerNonZeros[outer];
1381 while ( (p > startId) && (m_data.index(p-1) > inner) )
1382 {
1383 m_data.index(p) = m_data.index(p-1);
1384 m_data.value(p) = m_data.value(p-1);
1385 --p;
1386 }
1387 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
1389 m_innerNonZeros[outer]++;
1390
1391 m_data.index(p) = inner;
1392 return (m_data.value(p) = Scalar(0));
1393}
1394
1395template<typename _Scalar, int _Options, typename _StorageIndex>
1397{
1398 eigen_assert(isCompressed());
1399
1400 const Index outer = IsRowMajor ? row : col;
1401 const Index inner = IsRowMajor ? col : row;
1402
1403 Index previousOuter = outer;
1404 if (m_outerIndex[outer+1]==0)
1405 {
1406 // we start a new inner vector
1407 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408 {
1409 m_outerIndex[previousOuter] = convert_index(m_data.size());
1410 --previousOuter;
1411 }
1412 m_outerIndex[outer+1] = m_outerIndex[outer];
1413 }
1414
1415 // here we have to handle the tricky case where the outerIndex array
1416 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417 // the 2nd inner vector...
1418 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
1421 std::size_t startId = m_outerIndex[outer];
1422 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423 std::size_t p = m_outerIndex[outer+1];
1424 ++m_outerIndex[outer+1];
1425
1426 double reallocRatio = 1;
1427 if (m_data.allocatedSize()<=m_data.size())
1428 {
1429 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430 if (m_data.size()==0)
1431 {
1432 m_data.reserve(32);
1433 }
1434 else
1435 {
1436 // we need to reallocate the data, to reduce multiple reallocations
1437 // we use a smart resize algorithm based on the current filling ratio
1438 // in addition, we use double to avoid integers overflows
1439 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441 // furthermore we bound the realloc ratio to:
1442 // 1) reduce multiple minor realloc when the matrix is almost filled
1443 // 2) avoid to allocate too much memory when the matrix is almost empty
1444 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445 }
1446 }
1447 m_data.resize(m_data.size()+1,reallocRatio);
1448
1449 if (!isLastVec)
1450 {
1451 if (previousOuter==-1)
1452 {
1453 // oops wrong guess.
1454 // let's correct the outer offsets
1455 for (Index k=0; k<=(outer+1); ++k)
1456 m_outerIndex[k] = 0;
1457 Index k=outer+1;
1458 while(m_outerIndex[k]==0)
1459 m_outerIndex[k++] = 1;
1460 while (k<=m_outerSize && m_outerIndex[k]!=0)
1461 m_outerIndex[k++]++;
1462 p = 0;
1463 --k;
1464 k = m_outerIndex[k]-1;
1465 while (k>0)
1466 {
1467 m_data.index(k) = m_data.index(k-1);
1468 m_data.value(k) = m_data.value(k-1);
1469 k--;
1470 }
1471 }
1472 else
1473 {
1474 // we are not inserting into the last inner vec
1475 // update outer indices:
1476 Index j = outer+2;
1477 while (j<=m_outerSize && m_outerIndex[j]!=0)
1478 m_outerIndex[j++]++;
1479 --j;
1480 // shift data of last vecs:
1481 Index k = m_outerIndex[j]-1;
1482 while (k>=Index(p))
1483 {
1484 m_data.index(k) = m_data.index(k-1);
1485 m_data.value(k) = m_data.value(k-1);
1486 k--;
1487 }
1488 }
1489 }
1490
1491 while ( (p > startId) && (m_data.index(p-1) > inner) )
1492 {
1493 m_data.index(p) = m_data.index(p-1);
1494 m_data.value(p) = m_data.value(p-1);
1495 --p;
1496 }
1497
1498 m_data.index(p) = inner;
1499 return (m_data.value(p) = Scalar(0));
1500}
1501
1502namespace internal {
1503
1504template<typename _Scalar, int _Options, typename _StorageIndex>
1505struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507{
1511 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512};
1513
1514}
1515
1516} // end namespace Eigen
1517
1518#endif // EIGEN_SPARSEMATRIX_H
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ColXpr col(Index i)
This is the const version of col().
Definition: BlockMethods.h:1097
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RowXpr row(Index i)
This is the const version of row(). *‍/.
Definition: BlockMethods.h:1118
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typenameEIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typenameEIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
Definition: IndexedViewMethods.h:73
#define EIGEN_COMP_MSVC
Definition: Macros.h:124
#define eigen_internal_assert(x)
Definition: Macros.h:1053
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:1086
#define EIGEN_DONT_INLINE
Definition: Macros.h:950
#define eigen_assert(x)
Definition: Macros.h:1047
#define EIGEN_STRONG_INLINE
Definition: Macros.h:927
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:18
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:43
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
and restrictions which apply to each piece of software is included later in this file and or inside of the individual applicable source files The disclaimer of warranty in the WPILib license above applies to all code in and nothing in any of the other licenses gives permission to use the names of FIRST nor the names of the WPILib contributors to endorse or promote products derived from this software The following pieces of software have additional or alternate and or Google Inc All rights reserved Redistribution and use in source and binary with or without are permitted provided that the following conditions are this list of conditions and the following disclaimer *Redistributions in binary form must reproduce the above copyright this list of conditions and the following disclaimer in the documentation and or other materials provided with the distribution *Neither the name of Google Inc nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED BUT NOT LIMITED THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY OR CONSEQUENTIAL WHETHER IN STRICT OR EVEN IF ADVISED OF THE POSSIBILITY OF SUCH January AND DISTRIBUTION Definitions License shall mean the terms and conditions for and distribution as defined by Sections through of this document Licensor shall mean the copyright owner or entity authorized by the copyright owner that is granting the License Legal Entity shall mean the union of the acting entity and all other entities that control are controlled by or are under common control with that entity For the purposes of this definition control direct or to cause the direction or management of such whether by contract or including but not limited to software source documentation and configuration files Object form shall mean any form resulting from mechanical transformation or translation of a Source including but not limited to compiled object generated and conversions to other media types Work shall mean the work of whether in Source or Object made available under the as indicated by a copyright notice that is included in or attached to the whether in Source or Object that is based or other modifications as a an original work of authorship For the purposes of this Derivative Works shall not include works that remain separable or merely the Work and Derivative Works thereof Contribution shall mean any work of including the original version of the Work and any modifications or additions to that Work or Derivative Works that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner For the purposes of this submitted means any form of or written communication sent to the Licensor or its including but not limited to communication on electronic mailing source code control and issue tracking systems that are managed or on behalf the Licensor for the purpose of discussing and improving the but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as Not a Contribution Contributor shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work Grant of Copyright License Subject to the terms and conditions of this each Contributor hereby grants to You a non no royalty free
Definition: ThirdPartyNotices.txt:151
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:47
Definition: DiagonalMatrix.h:19
EIGEN_DEVICE_FUNC const Derived & derived() const
Definition: DiagonalMatrix.h:41
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
Sparse matrix.
Definition: MappedSparseMatrix.h:34
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:75
Definition: ReturnByValue.h:52
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
Definition: ReturnByValue.h:61
Definition: SparseCompressedBase.h:159
Definition: SparseCompressedBase.h:245
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:38
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Derived & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseAssign.h:17
bool isCompressed() const
Definition: SparseCompressedBase.h:107
@ IsRowMajor
Definition: SparseMatrixBase.h:100
Definition: SparseMatrix.h:875
SingletonVector(Index i, Index v)
Definition: SparseMatrix.h:880
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:884
StorageIndex value_type
Definition: SparseMatrix.h:879
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::StorageIndex StorageIndex
The integer type used to store indices within a SparseMatrix.
Definition: SparseMatrixBase.h:43
const Derived & derived() const
Definition: SparseMatrixBase.h:143
Index rows() const
Definition: SparseMatrixBase.h:176
bool isRValue() const
Definition: SparseMatrixBase.h:194
internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::Scalar Scalar
Definition: SparseMatrixBase.h:31
@ Flags
This stores expression Flags flags which may or may not be inherited by new expressions constructed f...
Definition: SparseMatrixBase.h:95
Derived & const_cast_derived() const
Definition: SparseMatrixBase.h:145
NumTraits< Scalar >::Real RealScalar
This is the "real scalar" type; if the Scalar type is already real numbers (e.g.
Definition: SparseMatrixBase.h:128
Index cols() const
Definition: SparseMatrixBase.h:178
static StorageIndex convert_index(const Index idx)
Definition: SparseMatrixBase.h:389
A versatible sparse matrix representation.
Definition: SparseMatrix.h:98
const Storage & data() const
Definition: SparseMatrix.h:186
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:786
Index m_outerSize
Definition: SparseMatrix.h:129
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Suppresses all nonzeros which are much smaller than reference under the tolerance epsilon.
Definition: SparseMatrix.h:510
void prune(const KeepFunc &keep=KeepFunc())
Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predica...
Definition: SparseMatrix.h:523
Index innerSize() const
Definition: SparseMatrix.h:143
Base::InnerIterator InnerIterator
Definition: SparseMatrix.h:114
void reserve(Index reserveSize)
Preallocates reserveSize non zeros.
Definition: SparseMatrix.h:264
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:728
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:172
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:287
void startVec(Index outer)
Definition: SparseMatrix.h:415
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:177
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:131
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:132
~SparseMatrix()
Destructor.
Definition: SparseMatrix.h:842
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:649
Scalar * valuePtr()
Definition: SparseMatrix.h:154
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:655
Storage & data()
Definition: SparseMatrix.h:184
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:718
Index outerSize() const
Definition: SparseMatrix.h:145
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:405
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Constructs a sparse matrix from the sparse expression other.
Definition: SparseMatrix.h:681
void finalize()
Definition: SparseMatrix.h:425
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:190
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Definition: SparseMatrix.h:927
Storage m_data
Definition: SparseMatrix.h:133
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition: SparseMatrix.h:119
void sumupDuplicates()
Definition: SparseMatrix.h:451
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:759
SparseMatrix & operator=(const Product< Lhs, Rhs, AliasFreeProduct > &other)
void makeCompressed()
Turns the matrix into the compressed format.
Definition: SparseMatrix.h:467
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Definition: SparseMatrix.h:796
Index rows() const
Definition: SparseMatrix.h:138
SparseMatrix()
Default constructor yielding an empty 0 x 0 matrix.
Definition: SparseMatrix.h:664
SparseMatrix(Index rows, Index cols)
Constructs a rows x cols empty matrix.
Definition: SparseMatrix.h:672
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
Definition: SparseMatrix.h:127
Base::ReverseInnerIterator ReverseInnerIterator
Definition: SparseMatrix.h:115
void uncompress()
Turns the matrix into the uncompressed mode.
Definition: SparseMatrix.h:498
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:159
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:168
bool isCompressed() const
Definition: SparseCompressedBase.h:107
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
Definition: SparseMatrix.h:1132
void setIdentity()
Sets *this to the identity matrix.
Definition: SparseMatrix.h:749
void conservativeResize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix leaving old values untouched.
Definition: SparseMatrix.h:556
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:393
Base::IndexVector IndexVector
Definition: SparseMatrix.h:124
Index cols() const
Definition: SparseMatrix.h:140
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
Definition: SparseMatrix.h:1396
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Constructs a sparse matrix from the sparse selfadjoint view other.
Definition: SparseMatrix.h:701
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1244
@ Options
Definition: SparseMatrix.h:121
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:386
const Scalar * valuePtr() const
Definition: SparseMatrix.h:150
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:181
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:163
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:894
Index m_innerSize
Definition: SparseMatrix.h:130
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:460
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Fill the matrix *this with the list of triplets defined by the iterator range begin - end.
Definition: SparseMatrix.h:1108
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:125
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:299
MappedSparseMatrix< Scalar, Flags > Map
Definition: SparseMatrix.h:111
void setZero()
Removes all non zeros but keep allocated memory.
Definition: SparseMatrix.h:253
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
The same as setFromTriplets but when duplicates are met the functor dup_func is applied:
Definition: SparseMatrix.h:1124
@ IsRowMajor
Definition: SparseMatrixBase.h:100
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:208
void swap(SparseMatrix &other)
Swaps the content of two sparse matrices of the same type.
Definition: SparseMatrix.h:737
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:112
EIGEN_DONT_INLINE SparseMatrix & operator=(const SparseMatrixBase< OtherDerived > &other)
void initAssignment(const Other &other)
Definition: SparseMatrix.h:858
SparseMatrix(const SparseMatrix &other)
Copy constructor (it performs a deep copy)
Definition: SparseMatrix.h:709
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
Definition: SparseMatrix.h:1364
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:661
void resize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix and initializes it to zero.
Definition: SparseMatrix.h:626
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:113
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
a sparse vector class
Definition: SparseVector.h:66
void reserve(Index size)
Definition: CompressedStorage.h:76
Index size() const
Definition: CompressedStorage.h:109
void moveChunk(Index from, Index to, Index chunkSize)
Definition: CompressedStorage.h:210
Index allocatedSize() const
Definition: CompressedStorage.h:110
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Like at(), but the search is performed in the range [start,end)
Definition: CompressedStorage.h:159
Scalar & value(Index i)
Definition: CompressedStorage.h:118
const Scalar * valuePtr() const
Definition: CompressedStorage.h:113
void clear()
Definition: CompressedStorage.h:111
void append(const Scalar &v, Index i)
Definition: CompressedStorage.h:101
StorageIndex & index(Index i)
Definition: CompressedStorage.h:121
void squeeze()
Definition: CompressedStorage.h:83
void resize(Index size, double reserveSizeFactor=0)
Definition: CompressedStorage.h:89
void swap(CompressedStorage &other)
Definition: CompressedStorage.h:62
const StorageIndex * indexPtr() const
Definition: CompressedStorage.h:115
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:125
Definition: core.h:1240
constexpr auto count() -> size_t
Definition: core.h:1204
type
Definition: core.h:575
@ ColMajor
Storage order is column major (see TopicStorageOrders).
Definition: Constants.h:319
@ RowMajor
Storage order is row major (see TopicStorageOrders).
Definition: Constants.h:321
const unsigned int LvalueBit
Means the expression has a coeffRef() method, i.e.
Definition: Constants.h:144
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition: Constants.h:66
const unsigned int CompressedAccessBit
Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed ...
Definition: Constants.h:191
constexpr common_t< T1, T2 > max(const T1 x, const T2 y) noexcept
Compile-time pairwise maximum function.
Definition: max.hpp:35
constexpr common_t< T1, T2 > min(const T1 x, const T2 y) noexcept
Compile-time pairwise minimum function.
Definition: min.hpp:35
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition: AssignEvaluator.h:873
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:31
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
Definition: MathFunctions.h:1940
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition: Memory.h:67
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:515
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1035
EIGEN_STRONG_INLINE void swap(T &a, T &b)
Definition: Meta.h:766
static EIGEN_DEPRECATED const end_t end
Definition: IndexedViewHelper.h:181
Namespace containing all symbols from the Eigen library.
Definition: Core:141
const unsigned int NestByRefBit
Definition: Constants.h:169
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
const int InnerRandomAccessPattern
Definition: SparseUtil.h:48
const int Dynamic
This value means that a positive quantity (e.g., a size) is not known at compile-time,...
Definition: Constants.h:22
auto reserve(std::back_insert_iterator< Container > it, size_t n) -> checked_ptr< typename Container::value_type >
Definition: format.h:509
Definition: Eigen_Colamd.h:50
void swap(wpi::SmallPtrSet< T, N > &LHS, wpi::SmallPtrSet< T, N > &RHS)
Implement std::swap in terms of SmallPtrSet swap.
Definition: SmallPtrSet.h:512
lb
Definition: mass.h:44
The type used to identify a dense storage.
Definition: Constants.h:507
Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor Matrix...
Definition: EigenBase.h:30
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:39
EIGEN_DEVICE_FUNC Derived & derived()
Definition: EigenBase.h:46
The type used to identify a matrix expression.
Definition: Constants.h:522
Holds information about the various numeric (i.e.
Definition: NumTraits.h:233
The type used to identify a general sparse storage.
Definition: Constants.h:510
Definition: SparseMatrix.h:907
Index i
Definition: SparseMatrix.h:909
Index p
Definition: SparseMatrix.h:910
IndexPosPair(Index a_i, Index a_p)
Definition: SparseMatrix.h:908
Definition: AssignEvaluator.h:824
Definition: SparseUtil.h:144
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
Definition: SparseMatrix.h:1509
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
Definition: SparseMatrix.h:1508
evaluator(const SparseMatrixType &mat)
Definition: SparseMatrix.h:1511
Definition: CoreEvaluators.h:91
Definition: Meta.h:148
Definition: XprHelper.h:458
Definition: XprHelper.h:417
T type
Definition: Meta.h:126
T type
Definition: Meta.h:114
Definition: BinaryFunctors.h:33
remove_reference< MatrixTypeNested >::type _MatrixTypeNested
Definition: SparseMatrix.h:68
SparseMatrix< _Scalar, _Options, _StorageIndex > MatrixType
Definition: SparseMatrix.h:66
ref_selector< MatrixType >::type MatrixTypeNested
Definition: SparseMatrix.h:67
Definition: ForwardDeclarations.h:17