10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
46 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
59 SupportedAccessPatterns = InnerRandomAccessPattern
63 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
68 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
70 typedef _Scalar Scalar;
72 typedef _StorageIndex StorageIndex;
77 ColsAtCompileTime = 1,
79 MaxColsAtCompileTime = 1,
84 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
86 :
public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
95 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
100 using Base::convert_index;
102 template<
typename,
typename,
typename,
typename,
typename>
108 using Base::operator+=;
109 using Base::operator-=;
118 using Base::IsRowMajor;
125 typedef typename Base::ScalarVector ScalarVector;
131 StorageIndex* m_outerIndex;
132 StorageIndex* m_innerNonZeros;
138 inline Index rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
140 inline Index cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
150 inline const Scalar*
valuePtr()
const {
return m_data.valuePtr(); }
154 inline Scalar*
valuePtr() {
return m_data.valuePtr(); }
159 inline const StorageIndex*
innerIndexPtr()
const {
return m_data.indexPtr(); }
184 inline Storage& data() {
return m_data; }
186 inline const Storage& data()
const {
return m_data; }
192 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
194 const Index outer = IsRowMajor ? row : col;
195 const Index inner = IsRowMajor ? col : row;
196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197 return m_data.
atInRange(m_outerIndex[outer], end, StorageIndex(inner));
210 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
212 const Index outer = IsRowMajor ? row : col;
213 const Index inner = IsRowMajor ? col : row;
215 Index start = m_outerIndex[outer];
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217 eigen_assert(end>=start &&
"you probably called coeffRef on a non finalized matrix");
221 if((p<end) && (m_data.index(p)==inner))
222 return m_data.value(p);
256 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(StorageIndex));
258 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(StorageIndex));
266 eigen_assert(
isCompressed() &&
"This function does not make sense in non compressed mode.");
267 m_data.reserve(reserveSize);
270 #ifdef EIGEN_PARSED_BY_DOXYGEN
283 template<
class SizesType>
284 inline void reserve(
const SizesType& reserveSizes);
286 template<
class SizesType>
287 inline void reserve(
const SizesType& reserveSizes,
const typename SizesType::value_type& enableif =
288 #
if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500)
291 SizesType::value_type())
293 EIGEN_UNUSED_VARIABLE(enableif);
294 reserveInnerVectors(reserveSizes);
296 #endif // EIGEN_PARSED_BY_DOXYGEN
298 template<
class SizesType>
299 inline void reserveInnerVectors(
const SizesType& reserveSizes)
303 Index totalReserveSize = 0;
305 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
306 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
309 StorageIndex* newOuterIndex = m_innerNonZeros;
311 StorageIndex count = 0;
312 for(
Index j=0; j<m_outerSize; ++j)
314 newOuterIndex[j] = count;
315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316 totalReserveSize += reserveSizes[j];
318 m_data.reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(
Index j=m_outerSize-1; j>=0; --j)
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323 for(
Index i=innerNNZ-1; i>=0; --i)
325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
328 previousOuterIndex = m_outerIndex[j];
329 m_outerIndex[j] = newOuterIndex[j];
330 m_innerNonZeros[j] = innerNNZ;
333 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
335 m_data.resize(m_outerIndex[m_outerSize]);
339 StorageIndex* newOuterIndex =
static_cast<StorageIndex*
>(std::malloc((m_outerSize+1)*
sizeof(StorageIndex)));
340 if (!newOuterIndex) internal::throw_std_bad_alloc();
342 StorageIndex count = 0;
343 for(
Index j=0; j<m_outerSize; ++j)
345 newOuterIndex[j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[j];
350 newOuterIndex[m_outerSize] = count;
352 m_data.resize(count);
353 for(
Index j=m_outerSize-1; j>=0; --j)
355 Index offset = newOuterIndex[j] - m_outerIndex[j];
358 StorageIndex innerNNZ = m_innerNonZeros[j];
359 for(
Index i=innerNNZ-1; i>=0; --i)
361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
367 std::swap(m_outerIndex, newOuterIndex);
368 std::free(newOuterIndex);
386 inline Scalar& insertBack(
Index row,
Index col)
388 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
393 inline Scalar& insertBackByOuterInner(
Index outer,
Index inner)
395 eigen_assert(
Index(m_outerIndex[outer+1]) == m_data.size() &&
"Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) &&
"Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
399 m_data.append(Scalar(0), inner);
400 return m_data.value(p);
405 inline Scalar& insertBackByOuterInnerUnordered(
Index outer,
Index inner)
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
409 m_data.append(Scalar(0), inner);
410 return m_data.value(p);
415 inline void startVec(
Index outer)
417 eigen_assert(m_outerIndex[outer]==
Index(m_data.size()) &&
"You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 &&
"You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
425 inline void finalize()
429 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430 Index i = m_outerSize;
432 while (i>=0 && m_outerIndex[i]==0)
435 while (i<=m_outerSize)
437 m_outerIndex[i] = size;
445 template<
typename InputIterators>
448 template<
typename InputIterators,
typename DupFunctor>
449 void setFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
453 template<
typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
472 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
474 Index oldStart = m_outerIndex[1];
475 m_outerIndex[1] = m_innerNonZeros[0];
476 for(
Index j=1; j<m_outerSize; ++j)
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
482 for(
Index k=0; k<m_innerNonZeros[j]; ++k)
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
488 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489 oldStart = nextOldStart;
491 std::free(m_innerNonZeros);
493 m_data.resize(m_outerIndex[m_outerSize]);
500 if(m_innerNonZeros != 0)
502 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
503 for (
Index i = 0; i < m_outerSize; i++)
505 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
512 prune(default_prunning_func(reference,epsilon));
522 template<
typename KeepFunc>
523 void prune(
const KeepFunc& keep = KeepFunc())
529 for(
Index j=0; j<m_outerSize; ++j)
531 Index previousStart = m_outerIndex[j];
533 Index end = m_outerIndex[j+1];
534 for(
Index i=previousStart; i<end; ++i)
536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
544 m_outerIndex[m_outerSize] = k;
559 if (this->
rows() == rows && this->
cols() == cols)
return;
566 StorageIndex newInnerSize = convert_index(IsRowMajor ?
cols :
rows);
572 StorageIndex *newInnerNonZeros =
static_cast<StorageIndex*
>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) *
sizeof(StorageIndex)));
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
576 for(
Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
579 else if (innerChange < 0)
582 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc((m_outerSize+outerChange+1) *
sizeof(StorageIndex)));
583 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584 for(
Index i = 0; i < m_outerSize; i++)
585 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
589 if (m_innerNonZeros && innerChange < 0)
591 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
593 StorageIndex &n = m_innerNonZeros[i];
594 StorageIndex start = m_outerIndex[i];
595 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
599 m_innerSize = newInnerSize;
602 if (outerChange == 0)
605 StorageIndex *newOuterIndex =
static_cast<StorageIndex*
>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) *
sizeof(StorageIndex)));
606 if (!newOuterIndex) internal::throw_std_bad_alloc();
607 m_outerIndex = newOuterIndex;
610 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
611 for(
Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
612 m_outerIndex[i] = lastIdx;
614 m_outerSize += outerChange;
627 m_innerSize = IsRowMajor ?
cols :
rows;
629 if (m_outerSize !=
outerSize || m_outerSize==0)
631 std::free(m_outerIndex);
632 m_outerIndex =
static_cast<StorageIndex*
>(std::malloc((
outerSize + 1) *
sizeof(StorageIndex)));
633 if (!m_outerIndex) internal::throw_std_bad_alloc();
639 std::free(m_innerNonZeros);
642 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(StorageIndex));
647 void resizeNonZeros(
Index size)
663 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
665 check_template_parameters();
671 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
673 check_template_parameters();
678 template<
typename OtherDerived>
680 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
684 check_template_parameters();
687 *
this = other.derived();
690 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
691 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693 internal::call_assignment_no_alias(*
this, other.derived());
698 template<
typename OtherDerived,
unsigned int UpLo>
700 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
702 check_template_parameters();
703 Base::operator=(other);
708 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
710 check_template_parameters();
711 *
this = other.derived();
715 template<
typename OtherDerived>
717 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
719 check_template_parameters();
720 initAssignment(other);
725 template<
typename OtherDerived>
727 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
729 check_template_parameters();
730 *
this = other.derived();
738 std::swap(m_outerIndex, other.m_outerIndex);
739 std::swap(m_innerSize, other.m_innerSize);
740 std::swap(m_outerSize, other.m_outerSize);
741 std::swap(m_innerNonZeros, other.m_innerNonZeros);
742 m_data.swap(other.m_data);
749 eigen_assert(
rows() ==
cols() &&
"ONLY FOR SQUARED MATRICES");
750 this->m_data.resize(
rows());
754 std::free(m_innerNonZeros);
759 if (other.isRValue())
761 swap(other.const_cast_derived());
763 else if(
this!=&other)
765 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
766 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768 initAssignment(other);
771 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
772 m_data = other.m_data;
776 Base::operator=(other);
782 #ifndef EIGEN_PARSED_BY_DOXYGEN
783 template<
typename OtherDerived>
784 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other)
785 {
return Base::operator=(other.derived()); }
787 template<
typename Lhs,
typename Rhs>
788 inline SparseMatrix& operator=(
const Product<Lhs,Rhs,AliasFreeProduct>& other);
789 #endif // EIGEN_PARSED_BY_DOXYGEN
791 template<
typename OtherDerived>
792 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
794 friend std::ostream & operator << (std::ostream & s,
const SparseMatrix& m)
797 s <<
"Nonzero entries:\n";
800 for (Index i=0; i<m.nonZeros(); ++i)
801 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
805 for (Index i=0; i<m.outerSize(); ++i)
807 Index p = m.m_outerIndex[i];
808 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
813 for (; k<m.m_outerIndex[i+1]; ++k) {
820 s <<
"Outer pointers:\n";
821 for (
Index i=0; i<m.outerSize(); ++i) {
822 s << m.m_outerIndex[i] <<
" ";
824 s <<
" $" << std::endl;
825 if(!m.isCompressed())
827 s <<
"Inner non zeros:\n";
828 for (Index i=0; i<m.outerSize(); ++i) {
829 s << m.m_innerNonZeros[i] <<
" ";
831 s <<
" $" << std::endl;
835 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
842 std::free(m_outerIndex);
843 std::free(m_innerNonZeros);
849 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
850 # include EIGEN_SPARSEMATRIX_PLUGIN
855 template<
typename Other>
856 void initAssignment(
const Other& other)
858 resize(other.rows(), other.cols());
861 std::free(m_innerNonZeros);
868 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
874 StorageIndex m_index;
875 StorageIndex m_value;
877 typedef StorageIndex value_type;
879 : m_index(convert_index(i)), m_value(convert_index(v))
882 StorageIndex operator[](
Index i)
const {
return i==m_index ? m_value : 0; }
887 EIGEN_DONT_INLINE Scalar& insertUncompressed(
Index row,
Index col);
892 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(
Index row,
Index col)
894 const Index outer = IsRowMajor ? row : col;
895 const Index inner = IsRowMajor ? col : row;
897 eigen_assert(!isCompressed());
898 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
900 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
901 m_data.index(p) = convert_index(inner);
902 return (m_data.value(p) = Scalar(0));
924 template<
typename DiagXpr,
typename Func>
925 void assignDiagonal(
const DiagXpr diagXpr,
const Func& assignFunc)
927 Index n = diagXpr.size();
932 if((this->rows()!=n) || (this->cols()!=n))
936 if(m_data.size()==0 || overwrite)
939 this->makeCompressed();
940 this->resizeNonZeros(n);
945 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
949 bool isComp = isCompressed();
950 internal::evaluator<DiagXpr> diaEval(diagXpr);
951 std::vector<IndexPosPair> newEntries;
954 for(Index i = 0; i<n; ++i)
956 internal::LowerBoundIndex lb = this->lower_bound(i,i);
961 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
963 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
967 m_innerNonZeros[i]++;
968 m_data.value(p) = Scalar(0);
969 m_data.index(p) = StorageIndex(i);
970 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
975 newEntries.push_back(IndexPosPair(i,p));
979 Index n_entries = Index(newEntries.size());
982 Storage newData(m_data.size()+n_entries);
985 for(Index k=0; k<n_entries;++k)
987 Index i = newEntries[k].i;
988 Index p = newEntries[k].p;
989 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
990 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
991 for(Index j=prev_i;j<i;++j)
992 m_outerIndex[j+1] += k;
994 m_innerNonZeros[i]++;
997 newData.value(p+k) = Scalar(0);
998 newData.index(p+k) = StorageIndex(i);
999 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1003 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1004 for(Index j=prev_i+1;j<=m_outerSize;++j)
1005 m_outerIndex[j] += n_entries;
1007 m_data.swap(newData);
1013 static void check_template_parameters()
1015 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1016 EIGEN_STATIC_ASSERT((Options&(
ColMajor|
RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019 struct default_prunning_func {
1020 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1021 inline bool operator() (
const Index&,
const Index&,
const Scalar& value)
const
1023 return !internal::isMuchSmallerThan(value, reference, epsilon);
1030 namespace internal {
1032 template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1033 void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1035 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1036 typedef typename SparseMatrixType::Scalar Scalar;
1037 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1038 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1043 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1045 for(InputIterator it(begin); it!=end; ++it)
1047 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1048 wi(IsRowMajor ? it->col() : it->row())++;
1053 for(InputIterator it(begin); it!=end; ++it)
1054 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057 trMat.collapseDuplicates(dup_func);
1104 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1105 template<
typename InputIterators>
1120 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1121 template<
typename InputIterators,
typename DupFunctor>
1124 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *
this, dup_func);
1128 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1129 template<
typename DupFunctor>
1132 eigen_assert(!isCompressed());
1134 IndexVector wi(innerSize());
1136 StorageIndex count = 0;
1138 for(
Index j=0; j<outerSize(); ++j)
1140 StorageIndex start = count;
1141 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1142 for(
Index k=m_outerIndex[j]; k<oldEnd; ++k)
1144 Index i = m_data.index(k);
1148 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1152 m_data.value(count) = m_data.value(k);
1153 m_data.index(count) = m_data.index(k);
1158 m_outerIndex[j] = start;
1160 m_outerIndex[m_outerSize] = count;
1163 std::free(m_innerNonZeros);
1164 m_innerNonZeros = 0;
1165 m_data.resize(m_outerIndex[m_outerSize]);
1168 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1169 template<
typename OtherDerived>
1170 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(
const SparseMatrixBase<OtherDerived>& other)
1172 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1173 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1175 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1176 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags &
RowMajorBit);
1180 if (needToTranspose)
1182 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1183 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1189 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1190 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1191 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1192 OtherCopy otherCopy(other.derived());
1193 OtherCopyEval otherCopyEval(otherCopy);
1195 SparseMatrix dest(other.rows(),other.cols());
1200 for (Index j=0; j<otherCopy.outerSize(); ++j)
1201 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1202 ++dest.m_outerIndex[it.index()];
1205 StorageIndex count = 0;
1206 IndexVector positions(dest.outerSize());
1207 for (Index j=0; j<dest.outerSize(); ++j)
1209 StorageIndex tmp = dest.m_outerIndex[j];
1210 dest.m_outerIndex[j] = count;
1211 positions[j] = count;
1214 dest.m_outerIndex[dest.outerSize()] = count;
1216 dest.m_data.resize(count);
1218 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1220 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1222 Index pos = positions[it.index()]++;
1223 dest.m_data.index(pos) = j;
1224 dest.m_data.value(pos) = it.value();
1232 if(other.isRValue())
1234 initAssignment(other.derived());
1237 return Base::operator=(other.derived());
1241 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1244 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1246 const Index outer = IsRowMajor ? row : col;
1247 const Index inner = IsRowMajor ? col : row;
1254 if(m_data.allocatedSize()==0)
1255 m_data.reserve(2*m_innerSize);
1258 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
1259 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1261 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(StorageIndex));
1265 StorageIndex end = convert_index(m_data.allocatedSize());
1266 for(
Index j=1; j<=m_outerSize; ++j)
1267 m_outerIndex[j] = end;
1272 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
1273 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1274 for(
Index j=0; j<m_outerSize; ++j)
1275 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1280 Index data_end = m_data.allocatedSize();
1284 if(m_outerIndex[outer]==data_end)
1286 eigen_internal_assert(m_innerNonZeros[outer]==0);
1290 StorageIndex p = convert_index(m_data.size());
1292 while(j>=0 && m_innerNonZeros[j]==0)
1293 m_outerIndex[j--] = p;
1296 ++m_innerNonZeros[outer];
1297 m_data.append(Scalar(0), inner);
1300 if(data_end != m_data.allocatedSize())
1305 eigen_internal_assert(data_end < m_data.allocatedSize());
1306 StorageIndex new_end = convert_index(m_data.allocatedSize());
1307 for(
Index k=outer+1; k<=m_outerSize; ++k)
1308 if(m_outerIndex[k]==data_end)
1309 m_outerIndex[k] = new_end;
1311 return m_data.value(p);
1316 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1318 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321 ++m_innerNonZeros[outer];
1322 m_data.resize(m_data.size()+1);
1325 if(data_end != m_data.allocatedSize())
1330 eigen_internal_assert(data_end < m_data.allocatedSize());
1331 StorageIndex new_end = convert_index(m_data.allocatedSize());
1332 for(
Index k=outer+1; k<=m_outerSize; ++k)
1333 if(m_outerIndex[k]==data_end)
1334 m_outerIndex[k] = new_end;
1338 Index startId = m_outerIndex[outer];
1339 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1340 while ( (p > startId) && (m_data.index(p-1) > inner) )
1342 m_data.index(p) = m_data.index(p-1);
1343 m_data.value(p) = m_data.value(p-1);
1347 m_data.index(p) = convert_index(inner);
1348 return (m_data.value(p) = Scalar(0));
1351 if(m_data.size() != m_data.allocatedSize())
1354 m_data.resize(m_data.allocatedSize());
1358 return insertUncompressed(row,col);
1361 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1364 eigen_assert(!isCompressed());
1366 const Index outer = IsRowMajor ? row : col;
1367 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1369 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1370 StorageIndex innerNNZ = m_innerNonZeros[outer];
1374 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377 Index startId = m_outerIndex[outer];
1378 Index p = startId + m_innerNonZeros[outer];
1379 while ( (p > startId) && (m_data.index(p-1) > inner) )
1381 m_data.index(p) = m_data.index(p-1);
1382 m_data.value(p) = m_data.value(p-1);
1385 eigen_assert((p<=startId || m_data.index(p-1)!=inner) &&
"you cannot insert an element that already exists, you must call coeffRef to this end");
1387 m_innerNonZeros[outer]++;
1389 m_data.index(p) = inner;
1390 return (m_data.value(p) = Scalar(0));
1393 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1394 EIGEN_DONT_INLINE
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1396 eigen_assert(isCompressed());
1398 const Index outer = IsRowMajor ? row : col;
1399 const Index inner = IsRowMajor ? col : row;
1401 Index previousOuter = outer;
1402 if (m_outerIndex[outer+1]==0)
1405 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1407 m_outerIndex[previousOuter] = convert_index(m_data.size());
1410 m_outerIndex[outer+1] = m_outerIndex[outer];
1416 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1417 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1419 std::size_t startId = m_outerIndex[outer];
1421 std::size_t p = m_outerIndex[outer+1];
1422 ++m_outerIndex[outer+1];
1424 double reallocRatio = 1;
1425 if (m_data.allocatedSize()<=m_data.size())
1428 if (m_data.size()==0)
1437 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1438 reallocRatio = (nnzEstimate-double(m_data.size()))/
double(m_data.size());
1442 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445 m_data.resize(m_data.size()+1,reallocRatio);
1449 if (previousOuter==-1)
1453 for (Index k=0; k<=(outer+1); ++k)
1454 m_outerIndex[k] = 0;
1456 while(m_outerIndex[k]==0)
1457 m_outerIndex[k++] = 1;
1458 while (k<=m_outerSize && m_outerIndex[k]!=0)
1459 m_outerIndex[k++]++;
1462 k = m_outerIndex[k]-1;
1465 m_data.index(k) = m_data.index(k-1);
1466 m_data.value(k) = m_data.value(k-1);
1475 while (j<=m_outerSize && m_outerIndex[j]!=0)
1476 m_outerIndex[j++]++;
1479 Index k = m_outerIndex[j]-1;
1482 m_data.index(k) = m_data.index(k-1);
1483 m_data.value(k) = m_data.value(k-1);
1489 while ( (p > startId) && (m_data.index(p-1) > inner) )
1491 m_data.index(p) = m_data.index(p-1);
1492 m_data.value(p) = m_data.value(p-1);
1496 m_data.index(p) = inner;
1497 return (m_data.value(p) = Scalar(0));
1500 namespace internal {
1502 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1504 :
evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1516 #endif // EIGEN_SPARSEMATRIX_H