Path Tracer
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49  typedef _Scalar Scalar;
50  typedef _StorageIndex StorageIndex;
51  typedef Sparse StorageKind;
52  typedef MatrixXpr XprKind;
53  enum {
54  RowsAtCompileTime = Dynamic,
55  ColsAtCompileTime = Dynamic,
56  MaxRowsAtCompileTime = Dynamic,
57  MaxColsAtCompileTime = Dynamic,
58  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59  SupportedAccessPatterns = InnerRandomAccessPattern
60  };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
67  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69 
70  typedef _Scalar Scalar;
71  typedef Dense StorageKind;
72  typedef _StorageIndex StorageIndex;
73  typedef MatrixXpr XprKind;
74 
75  enum {
76  RowsAtCompileTime = Dynamic,
77  ColsAtCompileTime = 1,
78  MaxRowsAtCompileTime = Dynamic,
79  MaxColsAtCompileTime = 1,
80  Flags = LvalueBit
81  };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88  enum {
89  Flags = 0
90  };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
97  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
100  using Base::convert_index;
101  friend class SparseVector<_Scalar,0,_StorageIndex>;
102  template<typename, typename, typename, typename, typename>
103  friend struct internal::Assignment;
104  public:
105  using Base::isCompressed;
106  using Base::nonZeros;
107  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108  using Base::operator+=;
109  using Base::operator-=;
110 
114  typedef typename Base::InnerIterator InnerIterator;
116 
117 
118  using Base::IsRowMajor;
120  enum {
121  Options = _Options
122  };
123 
124  typedef typename Base::IndexVector IndexVector;
125  typedef typename Base::ScalarVector ScalarVector;
126  protected:
127  typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128 
129  Index m_outerSize;
130  Index m_innerSize;
131  StorageIndex* m_outerIndex;
132  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133  Storage m_data;
134 
135  public:
136 
138  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
140  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141 
143  inline Index innerSize() const { return m_innerSize; }
145  inline Index outerSize() const { return m_outerSize; }
146 
150  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
154  inline Scalar* valuePtr() { return m_data.valuePtr(); }
155 
159  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
163  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164 
168  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
172  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173 
177  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
181  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182 
184  inline Storage& data() { return m_data; }
186  inline const Storage& data() const { return m_data; }
187 
190  inline Scalar coeff(Index row, Index col) const
191  {
192  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193 
194  const Index outer = IsRowMajor ? row : col;
195  const Index inner = IsRowMajor ? col : row;
196  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198  }
199 
208  inline Scalar& coeffRef(Index row, Index col)
209  {
210  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211 
212  const Index outer = IsRowMajor ? row : col;
213  const Index inner = IsRowMajor ? col : row;
214 
215  Index start = m_outerIndex[outer];
216  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218  if(end<=start)
219  return insert(row,col);
220  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221  if((p<end) && (m_data.index(p)==inner))
222  return m_data.value(p);
223  else
224  return insert(row,col);
225  }
226 
242  Scalar& insert(Index row, Index col);
243 
244  public:
245 
253  inline void setZero()
254  {
255  m_data.clear();
256  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257  if(m_innerNonZeros)
258  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259  }
260 
264  inline void reserve(Index reserveSize)
265  {
266  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267  m_data.reserve(reserveSize);
268  }
269 
270  #ifdef EIGEN_PARSED_BY_DOXYGEN
271 
283  template<class SizesType>
284  inline void reserve(const SizesType& reserveSizes);
285  #else
286  template<class SizesType>
287  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289  typename
290  #endif
291  SizesType::value_type())
292  {
293  EIGEN_UNUSED_VARIABLE(enableif);
294  reserveInnerVectors(reserveSizes);
295  }
296  #endif // EIGEN_PARSED_BY_DOXYGEN
297  protected:
298  template<class SizesType>
299  inline void reserveInnerVectors(const SizesType& reserveSizes)
300  {
301  if(isCompressed())
302  {
303  Index totalReserveSize = 0;
304  // turn the matrix into non-compressed mode
305  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307 
308  // temporarily use m_innerSizes to hold the new starting points.
309  StorageIndex* newOuterIndex = m_innerNonZeros;
310 
311  StorageIndex count = 0;
312  for(Index j=0; j<m_outerSize; ++j)
313  {
314  newOuterIndex[j] = count;
315  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316  totalReserveSize += reserveSizes[j];
317  }
318  m_data.reserve(totalReserveSize);
319  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320  for(Index j=m_outerSize-1; j>=0; --j)
321  {
322  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323  for(Index i=innerNNZ-1; i>=0; --i)
324  {
325  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327  }
328  previousOuterIndex = m_outerIndex[j];
329  m_outerIndex[j] = newOuterIndex[j];
330  m_innerNonZeros[j] = innerNNZ;
331  }
332  if(m_outerSize>0)
333  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334 
335  m_data.resize(m_outerIndex[m_outerSize]);
336  }
337  else
338  {
339  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340  if (!newOuterIndex) internal::throw_std_bad_alloc();
341 
342  StorageIndex count = 0;
343  for(Index j=0; j<m_outerSize; ++j)
344  {
345  newOuterIndex[j] = count;
346  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348  count += toReserve + m_innerNonZeros[j];
349  }
350  newOuterIndex[m_outerSize] = count;
351 
352  m_data.resize(count);
353  for(Index j=m_outerSize-1; j>=0; --j)
354  {
355  Index offset = newOuterIndex[j] - m_outerIndex[j];
356  if(offset>0)
357  {
358  StorageIndex innerNNZ = m_innerNonZeros[j];
359  for(Index i=innerNNZ-1; i>=0; --i)
360  {
361  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363  }
364  }
365  }
366 
367  std::swap(m_outerIndex, newOuterIndex);
368  std::free(newOuterIndex);
369  }
370 
371  }
372  public:
373 
374  //--- low level purely coherent filling ---
375 
386  inline Scalar& insertBack(Index row, Index col)
387  {
388  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389  }
390 
393  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394  {
395  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397  Index p = m_outerIndex[outer+1];
398  ++m_outerIndex[outer+1];
399  m_data.append(Scalar(0), inner);
400  return m_data.value(p);
401  }
402 
405  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406  {
407  Index p = m_outerIndex[outer+1];
408  ++m_outerIndex[outer+1];
409  m_data.append(Scalar(0), inner);
410  return m_data.value(p);
411  }
412 
415  inline void startVec(Index outer)
416  {
417  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419  m_outerIndex[outer+1] = m_outerIndex[outer];
420  }
421 
425  inline void finalize()
426  {
427  if(isCompressed())
428  {
429  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430  Index i = m_outerSize;
431  // find the last filled column
432  while (i>=0 && m_outerIndex[i]==0)
433  --i;
434  ++i;
435  while (i<=m_outerSize)
436  {
437  m_outerIndex[i] = size;
438  ++i;
439  }
440  }
441  }
442 
443  //---
444 
445  template<typename InputIterators>
446  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447 
448  template<typename InputIterators,typename DupFunctor>
449  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450 
451  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452 
453  template<typename DupFunctor>
454  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455 
456  //---
457 
460  Scalar& insertByOuterInner(Index j, Index i)
461  {
462  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463  }
464 
468  {
469  if(isCompressed())
470  return;
471 
472  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473 
474  Index oldStart = m_outerIndex[1];
475  m_outerIndex[1] = m_innerNonZeros[0];
476  for(Index j=1; j<m_outerSize; ++j)
477  {
478  Index nextOldStart = m_outerIndex[j+1];
479  Index offset = oldStart - m_outerIndex[j];
480  if(offset>0)
481  {
482  for(Index k=0; k<m_innerNonZeros[j]; ++k)
483  {
484  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486  }
487  }
488  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489  oldStart = nextOldStart;
490  }
491  std::free(m_innerNonZeros);
492  m_innerNonZeros = 0;
493  m_data.resize(m_outerIndex[m_outerSize]);
494  m_data.squeeze();
495  }
496 
498  void uncompress()
499  {
500  if(m_innerNonZeros != 0)
501  return;
502  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503  for (Index i = 0; i < m_outerSize; i++)
504  {
505  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506  }
507  }
508 
510  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511  {
512  prune(default_prunning_func(reference,epsilon));
513  }
514 
522  template<typename KeepFunc>
523  void prune(const KeepFunc& keep = KeepFunc())
524  {
525  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526  makeCompressed();
527 
528  StorageIndex k = 0;
529  for(Index j=0; j<m_outerSize; ++j)
530  {
531  Index previousStart = m_outerIndex[j];
532  m_outerIndex[j] = k;
533  Index end = m_outerIndex[j+1];
534  for(Index i=previousStart; i<end; ++i)
535  {
536  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537  {
538  m_data.value(k) = m_data.value(i);
539  m_data.index(k) = m_data.index(i);
540  ++k;
541  }
542  }
543  }
544  m_outerIndex[m_outerSize] = k;
545  m_data.resize(k,0);
546  }
547 
557  {
558  // No change
559  if (this->rows() == rows && this->cols() == cols) return;
560 
561  // If one dimension is null, then there is nothing to be preserved
562  if(rows==0 || cols==0) return resize(rows,cols);
563 
564  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567 
568  // Deals with inner non zeros
569  if (m_innerNonZeros)
570  {
571  // Resize m_innerNonZeros
572  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574  m_innerNonZeros = newInnerNonZeros;
575 
576  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577  m_innerNonZeros[i] = 0;
578  }
579  else if (innerChange < 0)
580  {
581  // Inner size decreased: allocate a new m_innerNonZeros
582  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
583  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584  for(Index i = 0; i < m_outerSize; i++)
585  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586  }
587 
588  // Change the m_innerNonZeros in case of a decrease of inner size
589  if (m_innerNonZeros && innerChange < 0)
590  {
591  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
592  {
593  StorageIndex &n = m_innerNonZeros[i];
594  StorageIndex start = m_outerIndex[i];
595  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
596  }
597  }
598 
599  m_innerSize = newInnerSize;
600 
601  // Re-allocate outer index structure if necessary
602  if (outerChange == 0)
603  return;
604 
605  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
606  if (!newOuterIndex) internal::throw_std_bad_alloc();
607  m_outerIndex = newOuterIndex;
608  if (outerChange > 0)
609  {
610  StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
611  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
612  m_outerIndex[i] = lastIdx;
613  }
614  m_outerSize += outerChange;
615  }
616 
625  {
626  const Index outerSize = IsRowMajor ? rows : cols;
627  m_innerSize = IsRowMajor ? cols : rows;
628  m_data.clear();
629  if (m_outerSize != outerSize || m_outerSize==0)
630  {
631  std::free(m_outerIndex);
632  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
633  if (!m_outerIndex) internal::throw_std_bad_alloc();
634 
635  m_outerSize = outerSize;
636  }
637  if(m_innerNonZeros)
638  {
639  std::free(m_innerNonZeros);
640  m_innerNonZeros = 0;
641  }
642  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
643  }
644 
647  void resizeNonZeros(Index size)
648  {
649  m_data.resize(size);
650  }
651 
654 
660 
662  inline SparseMatrix()
663  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
664  {
665  check_template_parameters();
666  resize(0, 0);
667  }
668 
671  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
672  {
673  check_template_parameters();
674  resize(rows, cols);
675  }
676 
678  template<typename OtherDerived>
680  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
681  {
683  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
684  check_template_parameters();
685  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
686  if (needToTranspose)
687  *this = other.derived();
688  else
689  {
690  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
691  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
692  #endif
693  internal::call_assignment_no_alias(*this, other.derived());
694  }
695  }
696 
698  template<typename OtherDerived, unsigned int UpLo>
700  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
701  {
702  check_template_parameters();
703  Base::operator=(other);
704  }
705 
707  inline SparseMatrix(const SparseMatrix& other)
708  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
709  {
710  check_template_parameters();
711  *this = other.derived();
712  }
713 
715  template<typename OtherDerived>
717  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
718  {
719  check_template_parameters();
720  initAssignment(other);
721  other.evalTo(*this);
722  }
723 
725  template<typename OtherDerived>
727  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
728  {
729  check_template_parameters();
730  *this = other.derived();
731  }
732 
735  inline void swap(SparseMatrix& other)
736  {
737  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
738  std::swap(m_outerIndex, other.m_outerIndex);
739  std::swap(m_innerSize, other.m_innerSize);
740  std::swap(m_outerSize, other.m_outerSize);
741  std::swap(m_innerNonZeros, other.m_innerNonZeros);
742  m_data.swap(other.m_data);
743  }
744 
747  inline void setIdentity()
748  {
749  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
750  this->m_data.resize(rows());
751  Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
752  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
753  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
754  std::free(m_innerNonZeros);
755  m_innerNonZeros = 0;
756  }
757  inline SparseMatrix& operator=(const SparseMatrix& other)
758  {
759  if (other.isRValue())
760  {
761  swap(other.const_cast_derived());
762  }
763  else if(this!=&other)
764  {
765  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
766  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
767  #endif
768  initAssignment(other);
769  if(other.isCompressed())
770  {
771  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
772  m_data = other.m_data;
773  }
774  else
775  {
776  Base::operator=(other);
777  }
778  }
779  return *this;
780  }
781 
782 #ifndef EIGEN_PARSED_BY_DOXYGEN
783  template<typename OtherDerived>
784  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
785  { return Base::operator=(other.derived()); }
786 
787  template<typename Lhs, typename Rhs>
788  inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
789 #endif // EIGEN_PARSED_BY_DOXYGEN
790 
791  template<typename OtherDerived>
792  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
793 
794  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
795  {
796  EIGEN_DBG_SPARSE(
797  s << "Nonzero entries:\n";
798  if(m.isCompressed())
799  {
800  for (Index i=0; i<m.nonZeros(); ++i)
801  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
802  }
803  else
804  {
805  for (Index i=0; i<m.outerSize(); ++i)
806  {
807  Index p = m.m_outerIndex[i];
808  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
809  Index k=p;
810  for (; k<pe; ++k) {
811  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
812  }
813  for (; k<m.m_outerIndex[i+1]; ++k) {
814  s << "(_,_) ";
815  }
816  }
817  }
818  s << std::endl;
819  s << std::endl;
820  s << "Outer pointers:\n";
821  for (Index i=0; i<m.outerSize(); ++i) {
822  s << m.m_outerIndex[i] << " ";
823  }
824  s << " $" << std::endl;
825  if(!m.isCompressed())
826  {
827  s << "Inner non zeros:\n";
828  for (Index i=0; i<m.outerSize(); ++i) {
829  s << m.m_innerNonZeros[i] << " ";
830  }
831  s << " $" << std::endl;
832  }
833  s << std::endl;
834  );
835  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
836  return s;
837  }
838 
840  inline ~SparseMatrix()
841  {
842  std::free(m_outerIndex);
843  std::free(m_innerNonZeros);
844  }
845 
847  Scalar sum() const;
848 
849 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
850 # include EIGEN_SPARSEMATRIX_PLUGIN
851 # endif
852 
853 protected:
854 
855  template<typename Other>
856  void initAssignment(const Other& other)
857  {
858  resize(other.rows(), other.cols());
859  if(m_innerNonZeros)
860  {
861  std::free(m_innerNonZeros);
862  m_innerNonZeros = 0;
863  }
864  }
865 
868  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
869 
873  {
874  StorageIndex m_index;
875  StorageIndex m_value;
876  public:
877  typedef StorageIndex value_type;
879  : m_index(convert_index(i)), m_value(convert_index(v))
880  {}
881 
882  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
883  };
884 
887  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
888 
889 public:
892  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
893  {
894  const Index outer = IsRowMajor ? row : col;
895  const Index inner = IsRowMajor ? col : row;
896 
897  eigen_assert(!isCompressed());
898  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
899 
900  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
901  m_data.index(p) = convert_index(inner);
902  return (m_data.value(p) = Scalar(0));
903  }
904 protected:
905  struct IndexPosPair {
906  IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
907  Index i;
908  Index p;
909  };
910 
924  template<typename DiagXpr, typename Func>
925  void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
926  {
927  Index n = diagXpr.size();
928 
929  const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
930  if(overwrite)
931  {
932  if((this->rows()!=n) || (this->cols()!=n))
933  this->resize(n, n);
934  }
935 
936  if(m_data.size()==0 || overwrite)
937  {
938  typedef Array<StorageIndex,Dynamic,1> ArrayXI;
939  this->makeCompressed();
940  this->resizeNonZeros(n);
941  Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
942  Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
943  Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
944  values.setZero();
945  internal::call_assignment_no_alias(values, diagXpr, assignFunc);
946  }
947  else
948  {
949  bool isComp = isCompressed();
950  internal::evaluator<DiagXpr> diaEval(diagXpr);
951  std::vector<IndexPosPair> newEntries;
952 
953  // 1 - try in-place update and record insertion failures
954  for(Index i = 0; i<n; ++i)
955  {
956  internal::LowerBoundIndex lb = this->lower_bound(i,i);
957  Index p = lb.value;
958  if(lb.found)
959  {
960  // the coeff already exists
961  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
962  }
963  else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
964  {
965  // non compressed mode with local room for inserting one element
966  m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
967  m_innerNonZeros[i]++;
968  m_data.value(p) = Scalar(0);
969  m_data.index(p) = StorageIndex(i);
970  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
971  }
972  else
973  {
974  // defer insertion
975  newEntries.push_back(IndexPosPair(i,p));
976  }
977  }
978  // 2 - insert deferred entries
979  Index n_entries = Index(newEntries.size());
980  if(n_entries>0)
981  {
982  Storage newData(m_data.size()+n_entries);
983  Index prev_p = 0;
984  Index prev_i = 0;
985  for(Index k=0; k<n_entries;++k)
986  {
987  Index i = newEntries[k].i;
988  Index p = newEntries[k].p;
989  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
990  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
991  for(Index j=prev_i;j<i;++j)
992  m_outerIndex[j+1] += k;
993  if(!isComp)
994  m_innerNonZeros[i]++;
995  prev_p = p;
996  prev_i = i;
997  newData.value(p+k) = Scalar(0);
998  newData.index(p+k) = StorageIndex(i);
999  assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1000  }
1001  {
1002  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1003  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1004  for(Index j=prev_i+1;j<=m_outerSize;++j)
1005  m_outerIndex[j] += n_entries;
1006  }
1007  m_data.swap(newData);
1008  }
1009  }
1010  }
1011 
1012 private:
1013  static void check_template_parameters()
1014  {
1015  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1016  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1017  }
1018 
1019  struct default_prunning_func {
1020  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1021  inline bool operator() (const Index&, const Index&, const Scalar& value) const
1022  {
1023  return !internal::isMuchSmallerThan(value, reference, epsilon);
1024  }
1025  Scalar reference;
1026  RealScalar epsilon;
1027  };
1028 };
1029 
1030 namespace internal {
1031 
1032 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1033 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1034 {
1035  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1036  typedef typename SparseMatrixType::Scalar Scalar;
1037  typedef typename SparseMatrixType::StorageIndex StorageIndex;
1038  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1039 
1040  if(begin!=end)
1041  {
1042  // pass 1: count the nnz per inner-vector
1043  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1044  wi.setZero();
1045  for(InputIterator it(begin); it!=end; ++it)
1046  {
1047  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1048  wi(IsRowMajor ? it->col() : it->row())++;
1049  }
1050 
1051  // pass 2: insert all the elements into trMat
1052  trMat.reserve(wi);
1053  for(InputIterator it(begin); it!=end; ++it)
1054  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1055 
1056  // pass 3:
1057  trMat.collapseDuplicates(dup_func);
1058  }
1059 
1060  // pass 4: transposed copy -> implicit sorting
1061  mat = trMat;
1062 }
1063 
1064 }
1065 
1066 
1104 template<typename Scalar, int _Options, typename _StorageIndex>
1105 template<typename InputIterators>
1106 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1107 {
1108  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1109 }
1110 
1120 template<typename Scalar, int _Options, typename _StorageIndex>
1121 template<typename InputIterators,typename DupFunctor>
1122 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1123 {
1124  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1125 }
1126 
1128 template<typename Scalar, int _Options, typename _StorageIndex>
1129 template<typename DupFunctor>
1131 {
1132  eigen_assert(!isCompressed());
1133  // TODO, in practice we should be able to use m_innerNonZeros for that task
1134  IndexVector wi(innerSize());
1135  wi.fill(-1);
1136  StorageIndex count = 0;
1137  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1138  for(Index j=0; j<outerSize(); ++j)
1139  {
1140  StorageIndex start = count;
1141  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1142  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1143  {
1144  Index i = m_data.index(k);
1145  if(wi(i)>=start)
1146  {
1147  // we already meet this entry => accumulate it
1148  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1149  }
1150  else
1151  {
1152  m_data.value(count) = m_data.value(k);
1153  m_data.index(count) = m_data.index(k);
1154  wi(i) = count;
1155  ++count;
1156  }
1157  }
1158  m_outerIndex[j] = start;
1159  }
1160  m_outerIndex[m_outerSize] = count;
1161 
1162  // turn the matrix into compressed form
1163  std::free(m_innerNonZeros);
1164  m_innerNonZeros = 0;
1165  m_data.resize(m_outerIndex[m_outerSize]);
1166 }
1167 
1168 template<typename Scalar, int _Options, typename _StorageIndex>
1169 template<typename OtherDerived>
1170 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1171 {
1172  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1173  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1174 
1175  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1176  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1177  #endif
1178 
1179  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1180  if (needToTranspose)
1181  {
1182  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1183  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1184  #endif
1185  // two passes algorithm:
1186  // 1 - compute the number of coeffs per dest inner vector
1187  // 2 - do the actual copy/eval
1188  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1189  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1190  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1191  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1192  OtherCopy otherCopy(other.derived());
1193  OtherCopyEval otherCopyEval(otherCopy);
1194 
1195  SparseMatrix dest(other.rows(),other.cols());
1196  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1197 
1198  // pass 1
1199  // FIXME the above copy could be merged with that pass
1200  for (Index j=0; j<otherCopy.outerSize(); ++j)
1201  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1202  ++dest.m_outerIndex[it.index()];
1203 
1204  // prefix sum
1205  StorageIndex count = 0;
1206  IndexVector positions(dest.outerSize());
1207  for (Index j=0; j<dest.outerSize(); ++j)
1208  {
1209  StorageIndex tmp = dest.m_outerIndex[j];
1210  dest.m_outerIndex[j] = count;
1211  positions[j] = count;
1212  count += tmp;
1213  }
1214  dest.m_outerIndex[dest.outerSize()] = count;
1215  // alloc
1216  dest.m_data.resize(count);
1217  // pass 2
1218  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1219  {
1220  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1221  {
1222  Index pos = positions[it.index()]++;
1223  dest.m_data.index(pos) = j;
1224  dest.m_data.value(pos) = it.value();
1225  }
1226  }
1227  this->swap(dest);
1228  return *this;
1229  }
1230  else
1231  {
1232  if(other.isRValue())
1233  {
1234  initAssignment(other.derived());
1235  }
1236  // there is no special optimization
1237  return Base::operator=(other.derived());
1238  }
1239 }
1240 
1241 template<typename _Scalar, int _Options, typename _StorageIndex>
1243 {
1244  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1245 
1246  const Index outer = IsRowMajor ? row : col;
1247  const Index inner = IsRowMajor ? col : row;
1248 
1249  if(isCompressed())
1250  {
1251  if(nonZeros()==0)
1252  {
1253  // reserve space if not already done
1254  if(m_data.allocatedSize()==0)
1255  m_data.reserve(2*m_innerSize);
1256 
1257  // turn the matrix into non-compressed mode
1258  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1259  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1260 
1261  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1262 
1263  // pack all inner-vectors to the end of the pre-allocated space
1264  // and allocate the entire free-space to the first inner-vector
1265  StorageIndex end = convert_index(m_data.allocatedSize());
1266  for(Index j=1; j<=m_outerSize; ++j)
1267  m_outerIndex[j] = end;
1268  }
1269  else
1270  {
1271  // turn the matrix into non-compressed mode
1272  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1273  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1274  for(Index j=0; j<m_outerSize; ++j)
1275  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1276  }
1277  }
1278 
1279  // check whether we can do a fast "push back" insertion
1280  Index data_end = m_data.allocatedSize();
1281 
1282  // First case: we are filling a new inner vector which is packed at the end.
1283  // We assume that all remaining inner-vectors are also empty and packed to the end.
1284  if(m_outerIndex[outer]==data_end)
1285  {
1286  eigen_internal_assert(m_innerNonZeros[outer]==0);
1287 
1288  // pack previous empty inner-vectors to end of the used-space
1289  // and allocate the entire free-space to the current inner-vector.
1290  StorageIndex p = convert_index(m_data.size());
1291  Index j = outer;
1292  while(j>=0 && m_innerNonZeros[j]==0)
1293  m_outerIndex[j--] = p;
1294 
1295  // push back the new element
1296  ++m_innerNonZeros[outer];
1297  m_data.append(Scalar(0), inner);
1298 
1299  // check for reallocation
1300  if(data_end != m_data.allocatedSize())
1301  {
1302  // m_data has been reallocated
1303  // -> move remaining inner-vectors back to the end of the free-space
1304  // so that the entire free-space is allocated to the current inner-vector.
1305  eigen_internal_assert(data_end < m_data.allocatedSize());
1306  StorageIndex new_end = convert_index(m_data.allocatedSize());
1307  for(Index k=outer+1; k<=m_outerSize; ++k)
1308  if(m_outerIndex[k]==data_end)
1309  m_outerIndex[k] = new_end;
1310  }
1311  return m_data.value(p);
1312  }
1313 
1314  // Second case: the next inner-vector is packed to the end
1315  // and the current inner-vector end match the used-space.
1316  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1317  {
1318  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1319 
1320  // add space for the new element
1321  ++m_innerNonZeros[outer];
1322  m_data.resize(m_data.size()+1);
1323 
1324  // check for reallocation
1325  if(data_end != m_data.allocatedSize())
1326  {
1327  // m_data has been reallocated
1328  // -> move remaining inner-vectors back to the end of the free-space
1329  // so that the entire free-space is allocated to the current inner-vector.
1330  eigen_internal_assert(data_end < m_data.allocatedSize());
1331  StorageIndex new_end = convert_index(m_data.allocatedSize());
1332  for(Index k=outer+1; k<=m_outerSize; ++k)
1333  if(m_outerIndex[k]==data_end)
1334  m_outerIndex[k] = new_end;
1335  }
1336 
1337  // and insert it at the right position (sorted insertion)
1338  Index startId = m_outerIndex[outer];
1339  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1340  while ( (p > startId) && (m_data.index(p-1) > inner) )
1341  {
1342  m_data.index(p) = m_data.index(p-1);
1343  m_data.value(p) = m_data.value(p-1);
1344  --p;
1345  }
1346 
1347  m_data.index(p) = convert_index(inner);
1348  return (m_data.value(p) = Scalar(0));
1349  }
1350 
1351  if(m_data.size() != m_data.allocatedSize())
1352  {
1353  // make sure the matrix is compatible to random un-compressed insertion:
1354  m_data.resize(m_data.allocatedSize());
1355  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1356  }
1357 
1358  return insertUncompressed(row,col);
1359 }
1360 
1361 template<typename _Scalar, int _Options, typename _StorageIndex>
1363 {
1364  eigen_assert(!isCompressed());
1365 
1366  const Index outer = IsRowMajor ? row : col;
1367  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1368 
1369  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1370  StorageIndex innerNNZ = m_innerNonZeros[outer];
1371  if(innerNNZ>=room)
1372  {
1373  // this inner vector is full, we need to reallocate the whole buffer :(
1374  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1375  }
1376 
1377  Index startId = m_outerIndex[outer];
1378  Index p = startId + m_innerNonZeros[outer];
1379  while ( (p > startId) && (m_data.index(p-1) > inner) )
1380  {
1381  m_data.index(p) = m_data.index(p-1);
1382  m_data.value(p) = m_data.value(p-1);
1383  --p;
1384  }
1385  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1386 
1387  m_innerNonZeros[outer]++;
1388 
1389  m_data.index(p) = inner;
1390  return (m_data.value(p) = Scalar(0));
1391 }
1392 
1393 template<typename _Scalar, int _Options, typename _StorageIndex>
1394 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1395 {
1396  eigen_assert(isCompressed());
1397 
1398  const Index outer = IsRowMajor ? row : col;
1399  const Index inner = IsRowMajor ? col : row;
1400 
1401  Index previousOuter = outer;
1402  if (m_outerIndex[outer+1]==0)
1403  {
1404  // we start a new inner vector
1405  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1406  {
1407  m_outerIndex[previousOuter] = convert_index(m_data.size());
1408  --previousOuter;
1409  }
1410  m_outerIndex[outer+1] = m_outerIndex[outer];
1411  }
1412 
1413  // here we have to handle the tricky case where the outerIndex array
1414  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1415  // the 2nd inner vector...
1416  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1417  && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1418 
1419  std::size_t startId = m_outerIndex[outer];
1420  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1421  std::size_t p = m_outerIndex[outer+1];
1422  ++m_outerIndex[outer+1];
1423 
1424  double reallocRatio = 1;
1425  if (m_data.allocatedSize()<=m_data.size())
1426  {
1427  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1428  if (m_data.size()==0)
1429  {
1430  m_data.reserve(32);
1431  }
1432  else
1433  {
1434  // we need to reallocate the data, to reduce multiple reallocations
1435  // we use a smart resize algorithm based on the current filling ratio
1436  // in addition, we use double to avoid integers overflows
1437  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1438  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1439  // furthermore we bound the realloc ratio to:
1440  // 1) reduce multiple minor realloc when the matrix is almost filled
1441  // 2) avoid to allocate too much memory when the matrix is almost empty
1442  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1443  }
1444  }
1445  m_data.resize(m_data.size()+1,reallocRatio);
1446 
1447  if (!isLastVec)
1448  {
1449  if (previousOuter==-1)
1450  {
1451  // oops wrong guess.
1452  // let's correct the outer offsets
1453  for (Index k=0; k<=(outer+1); ++k)
1454  m_outerIndex[k] = 0;
1455  Index k=outer+1;
1456  while(m_outerIndex[k]==0)
1457  m_outerIndex[k++] = 1;
1458  while (k<=m_outerSize && m_outerIndex[k]!=0)
1459  m_outerIndex[k++]++;
1460  p = 0;
1461  --k;
1462  k = m_outerIndex[k]-1;
1463  while (k>0)
1464  {
1465  m_data.index(k) = m_data.index(k-1);
1466  m_data.value(k) = m_data.value(k-1);
1467  k--;
1468  }
1469  }
1470  else
1471  {
1472  // we are not inserting into the last inner vec
1473  // update outer indices:
1474  Index j = outer+2;
1475  while (j<=m_outerSize && m_outerIndex[j]!=0)
1476  m_outerIndex[j++]++;
1477  --j;
1478  // shift data of last vecs:
1479  Index k = m_outerIndex[j]-1;
1480  while (k>=Index(p))
1481  {
1482  m_data.index(k) = m_data.index(k-1);
1483  m_data.value(k) = m_data.value(k-1);
1484  k--;
1485  }
1486  }
1487  }
1488 
1489  while ( (p > startId) && (m_data.index(p-1) > inner) )
1490  {
1491  m_data.index(p) = m_data.index(p-1);
1492  m_data.value(p) = m_data.value(p-1);
1493  --p;
1494  }
1495 
1496  m_data.index(p) = inner;
1497  return (m_data.value(p) = Scalar(0));
1498 }
1499 
1500 namespace internal {
1501 
1502 template<typename _Scalar, int _Options, typename _StorageIndex>
1503 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1504  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1505 {
1508  evaluator() : Base() {}
1509  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1510 };
1511 
1512 }
1513 
1514 } // end namespace Eigen
1515 
1516 #endif // EIGEN_SPARSEMATRIX_H
Eigen::SparseMatrix::resize
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:624
Eigen::SparseMatrix::cols
Index cols() const
Definition: SparseMatrix.h:140
Eigen::MatrixXpr
Definition: Constants.h:518
Eigen
Namespace containing all symbols from the Eigen library.
Definition: LDLT.h:16
Eigen::SparseMatrix
A versatible sparse matrix representation.
Definition: SparseMatrix.h:98
Eigen::ReturnByValue
Definition: ReturnByValue.h:52
Eigen::SparseMatrix::outerIndexPtr
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:172
Eigen::SparseMatrix::SingletonVector
Definition: SparseMatrix.h:873
Eigen::Sparse
Definition: Constants.h:506
Eigen::SparseMatrix::prune
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:510
Eigen::SparseMatrix::SparseMatrix
SparseMatrix()
Definition: SparseMatrix.h:662
Eigen::RowMajorBit
const unsigned int RowMajorBit
Definition: Constants.h:65
Eigen::SparseMatrix::uncompress
void uncompress()
Definition: SparseMatrix.h:498
Eigen::CompressedAccessBit
const unsigned int CompressedAccessBit
Definition: Constants.h:190
Eigen::Array
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:47
Eigen::RowMajor
@ RowMajor
Definition: Constants.h:320
Eigen::SparseCompressedBase::InnerIterator
Definition: SparseCompressedBase.h:159
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:699
Eigen::SparseCompressedBase::nonZeros
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Eigen::SparseCompressedBase
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:38
Eigen::SparseMatrix::diagonal
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:653
Eigen::SparseMatrix::valuePtr
const Scalar * valuePtr() const
Definition: SparseMatrix.h:150
Eigen::internal::CompressedStorage::searchLowerIndex
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:125
Eigen::SparseMatrix::innerIndexPtr
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:159
Eigen::LvalueBit
const unsigned int LvalueBit
Definition: Constants.h:143
Eigen::Dynamic
const int Dynamic
Definition: Constants.h:21
Eigen::SparseMatrix::setIdentity
void setIdentity()
Definition: SparseMatrix.h:747
Eigen::Diagonal
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:65
Eigen::SparseMatrix::outerSize
Index outerSize() const
Definition: SparseMatrix.h:145
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:707
Eigen::SparseMatrix::~SparseMatrix
~SparseMatrix()
Definition: SparseMatrix.h:840
Eigen::SparseMatrix::IndexPosPair
Definition: SparseMatrix.h:905
Eigen::SparseMatrix::swap
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:735
Eigen::SparseMatrix::innerNonZeroPtr
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:177
Eigen::internal::evaluator
Definition: CoreEvaluators.h:91
Eigen::SparseMatrix::coeff
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:190
Eigen::Map
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
Eigen::internal::ref_selector
Definition: XprHelper.h:425
Eigen::SparseMatrix::reserve
void reserve(Index reserveSize)
Definition: SparseMatrix.h:264
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1122
Eigen::SparseMatrix::outerIndexPtr
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:168
Eigen::internal::CompressedStorage::atInRange
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Definition: CompressedStorage.h:159
Eigen::SparseMatrix::innerNonZeroPtr
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:181
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:679
Eigen::SparseCompressedBase::ReverseInnerIterator
Definition: SparseCompressedBase.h:245
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:670
Eigen::internal::traits
Definition: ForwardDeclarations.h:17
Eigen::SparseMatrix::insert
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1242
Eigen::SparseVector
a sparse vector class
Definition: SparseVector.h:66
Eigen::SparseMatrix::valuePtr
Scalar * valuePtr()
Definition: SparseMatrix.h:154
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1106
Eigen::internal::Assignment
Definition: AssignEvaluator.h:814
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:716
Eigen::SparseMatrixBase
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
Eigen::SparseMatrix::setZero
void setZero()
Definition: SparseMatrix.h:253
Eigen::internal::CompressedStorage< Scalar, StorageIndex >
Eigen::SparseMatrix::innerSize
Index innerSize() const
Definition: SparseMatrix.h:143
Eigen::SparseMatrix::conservativeResize
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:556
Eigen::internal::is_same
Definition: Meta.h:115
Eigen::SparseSelfAdjointView
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
Eigen::Matrix< StorageIndex, Dynamic, 1 >
Eigen::internal::scalar_sum_op
Definition: BinaryFunctors.h:33
Eigen::ColMajor
@ ColMajor
Definition: Constants.h:318
Eigen::SparseMatrix::prune
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:523
Eigen::SparseMatrix::rows
Index rows() const
Definition: SparseMatrix.h:138
Eigen::SparseMatrix::coeffRef
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:208
Eigen::SparseMatrix::diagonal
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:659
Eigen::SparseMatrix::makeCompressed
void makeCompressed()
Definition: SparseMatrix.h:467
Eigen::SparseMatrix::isCompressed
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Eigen::SparseMatrix::innerIndexPtr
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:163
Eigen::DiagonalBase
Definition: DiagonalMatrix.h:19
Eigen::NumTraits
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:213
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:726
Eigen::MappedSparseMatrix
Sparse matrix.
Definition: MappedSparseMatrix.h:34
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:42
Eigen::SparseCompressedBase::isCompressed
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Eigen::Dense
Definition: Constants.h:503