12 #ifndef EIGEN_SPARSE_LU_H
13 #define EIGEN_SPARSE_LU_H
17 template <
typename _MatrixType,
typename _OrderingType = COLAMDOrdering<
typename _MatrixType::StorageIndex> >
class SparseLU;
18 template <
typename MappedSparseMatrixType>
struct SparseLUMatrixLReturnType;
19 template <
typename MatrixLType,
typename MatrixUType>
struct SparseLUMatrixUReturnType;
73 template <
typename _MatrixType,
typename _OrderingType>
74 class SparseLU :
public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex>
77 typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > APIBase;
78 using APIBase::m_isInitialized;
80 using APIBase::_solve_impl;
82 typedef _MatrixType MatrixType;
83 typedef _OrderingType OrderingType;
84 typedef typename MatrixType::Scalar Scalar;
85 typedef typename MatrixType::RealScalar RealScalar;
86 typedef typename MatrixType::StorageIndex StorageIndex;
87 typedef SparseMatrix<Scalar,ColMajor,StorageIndex> NCMatrix;
88 typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix;
89 typedef Matrix<Scalar,Dynamic,1> ScalarVector;
90 typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
91 typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
92 typedef internal::SparseLUImpl<Scalar, StorageIndex> Base;
95 ColsAtCompileTime = MatrixType::ColsAtCompileTime,
96 MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
100 SparseLU():m_lastError(
""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
104 explicit SparseLU(
const MatrixType& matrix)
105 : m_lastError(
""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
117 void factorize (
const MatrixType& matrix);
118 void simplicialfactorize(
const MatrixType& matrix);
132 inline Index rows()
const {
return m_mat.
rows(); }
133 inline Index cols()
const {
return m_mat.
cols(); }
137 m_symmetricmode = sym;
180 m_diagpivotthresh = thresh;
183 #ifdef EIGEN_PARSED_BY_DOXYGEN
190 template<
typename Rhs>
192 #endif // EIGEN_PARSED_BY_DOXYGEN
204 eigen_assert(m_isInitialized &&
"Decomposition is not initialized.");
216 template<
typename Rhs,
typename Dest>
219 Dest& X(X_base.derived());
220 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first");
222 THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
226 X.resize(B.rows(),B.cols());
229 for(
Index j = 0; j < B.cols(); ++j)
233 this->
matrixL().solveInPlace(X);
234 this->
matrixU().solveInPlace(X);
237 for (
Index j = 0; j < B.cols(); ++j)
256 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
258 Scalar det = Scalar(1.);
261 for (
Index j = 0; j < this->cols(); ++j)
263 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
267 det *= abs(it.value());
288 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
289 Scalar det = Scalar(0.);
290 for (
Index j = 0; j < this->cols(); ++j)
292 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
294 if(it.row() < j)
continue;
297 det += log(abs(it.value()));
311 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
316 for (
Index j = 0; j < this->cols(); ++j)
318 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
324 else if(it.value()==0)
330 return det * m_detPermR * m_detPermC;
339 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
341 Scalar det = Scalar(1.);
344 for (
Index j = 0; j < this->cols(); ++j)
346 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
355 return (m_detPermR * m_detPermC) > 0 ? det : -det;
358 Index nnzL()
const {
return m_nnzL; };
359 Index nnzU()
const {
return m_nnzU; };
363 void initperfvalues()
365 m_perfv.panel_size = 16;
367 m_perfv.maxsuper = 128;
370 m_perfv.fillfactor = 20;
375 bool m_factorizationIsOk;
377 std::string m_lastError;
380 MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore;
381 PermutationType m_perm_c;
382 PermutationType m_perm_r ;
385 typename Base::GlobalLU_t m_glu;
388 bool m_symmetricmode;
390 internal::perfvalues m_perfv;
391 RealScalar m_diagpivotthresh;
392 Index m_nnzL, m_nnzU;
393 Index m_detPermR, m_detPermC;
396 SparseLU (
const SparseLU& );
413 template <
typename MatrixType,
typename OrderingType>
431 ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?
const_cast<StorageIndex*
>(mat.outerIndexPtr()):0);
434 if(!mat.isCompressed())
435 IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1);
438 for (
Index i = 0; i < mat.cols(); i++)
440 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
441 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
447 internal::coletree(m_mat, m_etree,firstRowElt);
450 if (!m_symmetricmode) {
453 internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post);
457 Index m = m_mat.cols();
459 for (
Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
464 for (
Index i = 0; i < m; i++)
465 post_perm.
indices()(i) = post(i);
468 if(m_perm_c.size()) {
469 m_perm_c = post_perm * m_perm_c;
474 m_analysisIsOk =
true;
498 template <
typename MatrixType,
typename OrderingType>
501 using internal::emptyIdxLU;
502 eigen_assert(m_analysisIsOk &&
"analyzePattern() should be called first");
503 eigen_assert((matrix.rows() == matrix.cols()) &&
"Only for squared matrices");
505 m_isInitialized =
true;
514 const StorageIndex * outerIndexPtr;
515 if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
518 StorageIndex* outerIndexPtr_t =
new StorageIndex[matrix.cols()+1];
519 for(
Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
520 outerIndexPtr = outerIndexPtr_t;
522 for (
Index i = 0; i < matrix.cols(); i++)
524 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
525 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
527 if(!matrix.isCompressed())
delete[] outerIndexPtr;
531 m_perm_c.resize(matrix.cols());
532 for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
535 Index m = m_mat.rows();
536 Index n = m_mat.cols();
537 Index nnz = m_mat.nonZeros();
538 Index maxpanel = m_perfv.panel_size * m;
541 Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
544 m_lastError =
"UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
545 m_factorizationIsOk =
false;
565 tempv.
setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, m) );
572 if ( m_symmetricmode ==
true )
573 Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
575 Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
579 m_perm_r.indices().setConstant(-1);
583 m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
584 m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) =
Index(0);
596 for (jcol = 0; jcol < n; )
599 Index panel_size = m_perfv.panel_size;
600 for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
602 if (relax_end(k) != emptyIdxLU)
604 panel_size = k - jcol;
609 panel_size = n - jcol;
612 Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
615 Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
618 for ( jj = jcol; jj< jcol + panel_size; jj++)
626 info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
629 m_lastError =
"UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
631 m_factorizationIsOk =
false;
637 info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
640 m_lastError =
"UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
642 m_factorizationIsOk =
false;
647 info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
650 m_lastError =
"UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
652 m_factorizationIsOk =
false;
657 info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.
indices(), pivrow, m_glu);
660 m_lastError =
"THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
661 std::ostringstream returnInfo;
663 m_lastError += returnInfo.str();
665 m_factorizationIsOk =
false;
671 if (pivrow != jj) m_detPermR = -m_detPermR;
674 Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
677 for (i = 0; i < nseg; i++)
680 repfnz_k(irep) = emptyIdxLU;
686 m_detPermR = m_perm_r.determinant();
687 m_detPermC = m_perm_c.determinant();
690 Base::countnz(n, m_nnzL, m_nnzU, m_glu);
692 Base::fixupL(n, m_perm_r.indices(), m_glu);
695 m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
700 m_factorizationIsOk =
true;
703 template<
typename MappedSupernodalType>
706 typedef typename MappedSupernodalType::Scalar Scalar;
709 Index rows()
const {
return m_mapL.rows(); }
710 Index cols()
const {
return m_mapL.cols(); }
711 template<
typename Dest>
714 m_mapL.solveInPlace(X);
716 const MappedSupernodalType& m_mapL;
719 template<
typename MatrixLType,
typename MatrixUType>
722 typedef typename MatrixLType::Scalar Scalar;
724 : m_mapL(mapL),m_mapU(mapU)
726 Index rows()
const {
return m_mapL.rows(); }
727 Index cols()
const {
return m_mapL.cols(); }
731 Index nrhs = X.cols();
734 for (
Index k = m_mapL.nsuper(); k >= 0; k--)
736 Index fsupc = m_mapL.supToCol()[k];
737 Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc];
738 Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
739 Index luptr = m_mapL.colIndexPtr()[fsupc];
743 for (
Index j = 0; j < nrhs; j++)
745 X(fsupc, j) /= m_mapL.valuePtr()[luptr];
753 U = A.template triangularView<Upper>().solve(U);
756 for (
Index j = 0; j < nrhs; ++j)
758 for (
Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
760 typename MatrixUType::InnerIterator it(m_mapU, jcol);
763 Index irow = it.index();
764 X(irow, j) -= X(jcol, j) * it.
value();
770 const MatrixLType& m_mapL;
771 const MatrixUType& m_mapU;