10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
31 template<
typename MatrixType,
unsigned int Mode>
35 template<
int SrcMode,
int DstMode,
typename MatrixType,
int DestOrder>
38 template<
int Mode,
typename MatrixType,
int DestOrder>
44 :
public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
56 typedef typename MatrixType::Scalar Scalar;
57 typedef typename MatrixType::StorageIndex StorageIndex;
60 typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
64 eigen_assert(rows()==cols() &&
"SelfAdjointView is only for squared matrices");
67 inline Index rows()
const {
return m_matrix.rows(); }
68 inline Index cols()
const {
return m_matrix.cols(); }
71 const _MatrixTypeNested& matrix()
const {
return m_matrix; }
72 typename internal::remove_reference<MatrixTypeNested>::type& matrix() {
return m_matrix; }
79 template<
typename OtherDerived>
91 template<
typename OtherDerived>
friend
99 template<
typename OtherDerived>
107 template<
typename OtherDerived>
friend
122 template<
typename DerivedU>
132 template<
typename SrcMatrixType,
int SrcMode>
135 internal::call_assignment_no_alias_no_transpose(*
this, permutedMatrix);
139 SparseSelfAdjointView& operator=(
const SparseSelfAdjointView& src)
141 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
142 return *
this = src.twistedBy(pnull);
145 template<
typename SrcMatrixType,
unsigned int SrcMode>
146 SparseSelfAdjointView& operator=(
const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
148 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
149 return *
this = src.twistedBy(pnull);
154 EIGEN_ONLY_USED_FOR_DEBUG(rows);
155 EIGEN_ONLY_USED_FOR_DEBUG(cols);
156 eigen_assert(rows == this->rows() && cols == this->cols()
157 &&
"SparseSelfadjointView::resize() does not actually allow to resize.");
162 MatrixTypeNested m_matrix;
166 template<
typename Dest>
void evalTo(Dest &)
const;
173 template<
typename Derived>
174 template<
unsigned int UpLo>
175 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
const
177 return SparseSelfAdjointView<const Derived, UpLo>(derived());
180 template<
typename Derived>
181 template<
unsigned int UpLo>
182 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
184 return SparseSelfAdjointView<Derived, UpLo>(derived());
191 template<
typename MatrixType,
unsigned int Mode>
192 template<
typename DerivedU>
193 SparseSelfAdjointView<MatrixType,Mode>&
198 m_matrix = tmp.template triangularView<Mode>();
200 m_matrix += alpha * tmp.template triangularView<Mode>();
210 template<
typename MatrixType,
unsigned int Mode>
222 template<
typename DstXprType,
typename SrcXprType,
typename Functor>
225 typedef typename DstXprType::StorageIndex StorageIndex;
228 template<
typename DestScalar,
int StorageOrder>
231 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
235 template<
typename DestScalar,
int StorageOrder,
typename AssignFunc>
240 call_assignment_no_alias_no_transpose(dst, tmp, func);
243 template<
typename DestScalar,
int StorageOrder>
252 template<
typename DestScalar,
int StorageOrder>
261 template<
typename DestScalar>
266 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
279 template<
int Mode,
typename SparseLhsType,
typename DenseRhsType,
typename DenseResType,
typename AlphaType>
280 inline void sparse_selfadjoint_time_dense_product(
const SparseLhsType& lhs,
const DenseRhsType& rhs, DenseResType& res,
const AlphaType& alpha)
282 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
285 typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
286 typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
287 typedef typename LhsEval::InnerIterator LhsIterator;
288 typedef typename SparseLhsType::Scalar LhsScalar;
294 || ( (Mode&
Upper) && !LhsIsRowMajor)
295 || ( (Mode&
Lower) && LhsIsRowMajor),
296 ProcessSecondHalf = !ProcessFirstHalf
299 SparseLhsTypeNested lhs_nested(lhs);
300 LhsEval lhsEval(lhs_nested);
303 for (
Index k=0; k<rhs.cols(); ++k)
305 for (
Index j=0; j<lhs.outerSize(); ++j)
307 LhsIterator i(lhsEval,j);
309 if (ProcessSecondHalf)
311 while (i && i.index()<j) ++i;
312 if(i && i.index()==j)
314 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
320 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
322 typename DenseResType::Scalar res_j(0);
323 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
325 LhsScalar lhs_ij = i.value();
326 if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
327 res_j += lhs_ij * rhs.coeff(i.index(),k);
328 res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
330 res.coeffRef(j,k) += alpha * res_j;
333 if (ProcessFirstHalf && i && (i.index()==j))
334 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
340 template<
typename LhsView,
typename Rhs,
int ProductType>
341 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
342 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
344 template<
typename Dest>
345 static void scaleAndAddTo(Dest& dst,
const LhsView& lhsView,
const Rhs& rhs,
const typename Dest::Scalar& alpha)
347 typedef typename LhsView::_MatrixTypeNested Lhs;
350 LhsNested lhsNested(lhsView.matrix());
351 RhsNested rhsNested(rhs);
353 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
357 template<
typename Lhs,
typename RhsView,
int ProductType>
359 :
generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
361 template<
typename Dest>
362 static void scaleAndAddTo(Dest& dst,
const Lhs& lhs,
const RhsView& rhsView,
const typename Dest::Scalar& alpha)
364 typedef typename RhsView::_MatrixTypeNested Rhs;
367 LhsNested lhsNested(lhs);
368 RhsNested rhsNested(rhsView.matrix());
372 internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
379 template<
typename LhsView,
typename Rhs,
int ProductTag>
381 :
public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
384 typedef typename XprType::PlainObject PlainObject;
388 : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
390 ::new (
static_cast<Base*
>(
this))
Base(m_result);
395 typename Rhs::PlainObject m_lhs;
396 PlainObject m_result;
399 template<
typename Lhs,
typename RhsView,
int ProductTag>
401 :
public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
404 typedef typename XprType::PlainObject PlainObject;
408 : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
410 ::new (
static_cast<Base*
>(
this))
Base(m_result);
415 typename Lhs::PlainObject m_rhs;
416 PlainObject m_result;
426 template<
int Mode,
typename MatrixType,
int DestOrder>
429 typedef typename MatrixType::StorageIndex StorageIndex;
430 typedef typename MatrixType::Scalar Scalar;
433 typedef evaluator<MatrixType> MatEval;
434 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
436 MatEval matEval(mat);
437 Dest& dest(_dest.derived());
439 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
442 Index size = mat.rows();
446 dest.resize(size,size);
447 for(
Index j = 0; j<size; ++j)
449 Index jp = perm ? perm[j] : j;
450 for(MatIterator it(matEval,j); it; ++it)
452 Index i = it.index();
455 Index ip = perm ? perm[i] : i;
457 count[StorageOrderMatch ? jp : ip]++;
460 else if(( Mode==
Lower && r>c) || ( Mode==
Upper && r<c))
467 Index nnz = count.sum();
470 dest.resizeNonZeros(nnz);
471 dest.outerIndexPtr()[0] = 0;
472 for(
Index j=0; j<size; ++j)
473 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
474 for(
Index j=0; j<size; ++j)
475 count[j] = dest.outerIndexPtr()[j];
478 for(StorageIndex j = 0; j<size; ++j)
480 for(MatIterator it(matEval,j); it; ++it)
482 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
486 StorageIndex jp = perm ? perm[j] : j;
487 StorageIndex ip = perm ? perm[i] : i;
491 Index k = count[StorageOrderMatch ? jp : ip]++;
492 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
493 dest.valuePtr()[k] = it.value();
497 Index k = count[ip]++;
498 dest.innerIndexPtr()[k] = ip;
499 dest.valuePtr()[k] = it.value();
503 if(!StorageOrderMatch)
505 Index k = count[jp]++;
506 dest.innerIndexPtr()[k] = ip;
507 dest.valuePtr()[k] = it.value();
509 dest.innerIndexPtr()[k] = jp;
510 dest.valuePtr()[k] = numext::conj(it.value());
516 template<
int _SrcMode,
int _DstMode,
typename MatrixType,
int DstOrder>
517 void permute_symm_to_symm(
const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest,
const typename MatrixType::StorageIndex* perm)
519 typedef typename MatrixType::StorageIndex StorageIndex;
520 typedef typename MatrixType::Scalar Scalar;
521 SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
522 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
523 typedef evaluator<MatrixType> MatEval;
524 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
528 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
533 MatEval matEval(mat);
535 Index size = mat.rows();
538 dest.resize(size,size);
539 for(StorageIndex j = 0; j<size; ++j)
541 StorageIndex jp = perm ? perm[j] : j;
542 for(MatIterator it(matEval,j); it; ++it)
544 StorageIndex i = it.index();
545 if((
int(SrcMode)==
int(
Lower) && i<j) || (
int(SrcMode)==
int(
Upper) && i>j))
548 StorageIndex ip = perm ? perm[i] : i;
549 count[int(DstMode)==int(
Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
552 dest.outerIndexPtr()[0] = 0;
553 for(
Index j=0; j<size; ++j)
554 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
555 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
556 for(
Index j=0; j<size; ++j)
557 count[j] = dest.outerIndexPtr()[j];
559 for(StorageIndex j = 0; j<size; ++j)
562 for(MatIterator it(matEval,j); it; ++it)
564 StorageIndex i = it.index();
565 if((
int(SrcMode)==int(
Lower) && i<j) || (
int(SrcMode)==int(
Upper) && i>j))
568 StorageIndex jp = perm ? perm[j] : j;
569 StorageIndex ip = perm? perm[i] : i;
571 Index k = count[int(DstMode)==int(
Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
572 dest.innerIndexPtr()[k] = int(DstMode)==int(
Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
574 if(!StorageOrderMatch) std::swap(ip,jp);
575 if( ((
int(DstMode)==
int(
Lower) && ip<jp) || (
int(DstMode)==
int(
Upper) && ip>jp)))
576 dest.valuePtr()[k] = numext::conj(it.value());
578 dest.valuePtr()[k] = it.value();
589 template<
typename MatrixType,
int Mode>
595 template<
typename MatrixType,
int Mode>
597 :
public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
600 typedef typename MatrixType::Scalar Scalar;
601 typedef typename MatrixType::StorageIndex StorageIndex;
610 typedef typename MatrixType::Nested MatrixTypeNested;
611 typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
614 : m_matrix(mat), m_perm(perm)
617 inline Index rows()
const {
return m_matrix.rows(); }
618 inline Index cols()
const {
return m_matrix.cols(); }
620 const NestedExpression& matrix()
const {
return m_matrix; }
621 const Perm& perm()
const {
return m_perm; }
624 MatrixTypeNested m_matrix;
631 template<
typename DstXprType,
typename MatrixType,
int Mode,
typename Scalar>
635 typedef typename DstXprType::StorageIndex DstIndex;
636 template<
int Options>
641 internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().
indices().data());
645 template<
typename DestType,
unsigned int DestMode>
648 internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().
indices().data());
656 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H