mirror of https://github.com/inolen/redream.git
updated eigen to 3.2.8
This commit is contained in:
parent
88639101dd
commit
7c2cdfbb56
|
@ -81,7 +81,7 @@ list(APPEND REDREAM_INCLUDE_DIRS deps/dirent-1.21)
|
|||
|
||||
# eigen
|
||||
list(APPEND REDREAM_DEFS EIGEN_DEFAULT_TO_ROW_MAJOR)
|
||||
list(APPEND REDREAM_INCLUDE_DIRS deps/eigen-3.2.1)
|
||||
list(APPEND REDREAM_INCLUDE_DIRS deps/eigen-3.2.8)
|
||||
|
||||
# gdb_server
|
||||
list(APPEND REDREAM_INCLUDE_DIRS deps/gdb_server)
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
# Pastix requires METIS or METIS (partitioning and reordering tools)
|
||||
|
||||
if (METIS_INCLUDES AND METIS_LIBRARIES)
|
||||
set(METIS_FIND_QUIETLY TRUE)
|
||||
endif (METIS_INCLUDES AND METIS_LIBRARIES)
|
||||
|
||||
find_path(METIS_INCLUDES
|
||||
NAMES
|
||||
metis.h
|
||||
PATHS
|
||||
$ENV{METISDIR}
|
||||
${INCLUDE_INSTALL_DIR}
|
||||
PATH_SUFFIXES
|
||||
metis
|
||||
include
|
||||
)
|
||||
|
||||
|
||||
find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(METIS DEFAULT_MSG
|
||||
METIS_INCLUDES METIS_LIBRARIES)
|
||||
|
||||
mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES)
|
|
@ -1,40 +0,0 @@
|
|||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "sparse_solver.h"
|
||||
|
||||
template<typename T> void test_simplicial_cholesky_T()
|
||||
{
|
||||
SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower;
|
||||
SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper;
|
||||
SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower;
|
||||
SimplicialLDLT<SparseMatrix<T>, Upper> llt_colmajor_upper;
|
||||
SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower;
|
||||
SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper;
|
||||
|
||||
check_sparse_spd_solving(chol_colmajor_lower);
|
||||
check_sparse_spd_solving(chol_colmajor_upper);
|
||||
check_sparse_spd_solving(llt_colmajor_lower);
|
||||
check_sparse_spd_solving(llt_colmajor_upper);
|
||||
check_sparse_spd_solving(ldlt_colmajor_lower);
|
||||
check_sparse_spd_solving(ldlt_colmajor_upper);
|
||||
|
||||
check_sparse_spd_determinant(chol_colmajor_lower);
|
||||
check_sparse_spd_determinant(chol_colmajor_upper);
|
||||
check_sparse_spd_determinant(llt_colmajor_lower);
|
||||
check_sparse_spd_determinant(llt_colmajor_upper);
|
||||
check_sparse_spd_determinant(ldlt_colmajor_lower);
|
||||
check_sparse_spd_determinant(ldlt_colmajor_upper);
|
||||
}
|
||||
|
||||
void test_simplicial_cholesky()
|
||||
{
|
||||
CALL_SUBTEST_1(test_simplicial_cholesky_T<double>());
|
||||
CALL_SUBTEST_2(test_simplicial_cholesky_T<std::complex<double> >());
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials
|
||||
FFT NonLinearOptimization SparseExtra IterativeSolvers
|
||||
NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines LevenbergMarquardt
|
||||
)
|
||||
|
||||
install(FILES
|
||||
${Eigen_HEADERS}
|
||||
DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel
|
||||
)
|
||||
|
||||
add_subdirectory(src)
|
|
@ -1,4 +1,4 @@
|
|||
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
||||
node: 6b38706d90a9fe182e66ab88477b3dbde34b9f66
|
||||
node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15
|
||||
branch: 3.2
|
||||
tag: 3.2.1
|
||||
tag: 3.2.8
|
|
@ -24,3 +24,10 @@ da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1
|
|||
4b687cad1d23066f66863f4f87298447298443df 3.2-rc1
|
||||
1eeda7b1258bcd306018c0738e2b6a8543661141 3.2-rc2
|
||||
ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
|
||||
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
|
||||
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
|
||||
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
|
||||
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
||||
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
||||
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
|
||||
b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7
|
|
@ -1,6 +1,5 @@
|
|||
project(Eigen)
|
||||
|
||||
cmake_minimum_required(VERSION 2.8.2)
|
||||
cmake_minimum_required(VERSION 2.8.5)
|
||||
|
||||
# guard against in-source builds
|
||||
|
||||
|
@ -55,6 +54,7 @@ endif(EIGEN_HG_CHANGESET)
|
|||
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
|
||||
|
@ -288,25 +288,26 @@ option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF)
|
|||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
# the user modifiable install path for header files
|
||||
set(EIGEN_INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} CACHE PATH "The directory where we install the header files (optional)")
|
||||
|
||||
# set the internal install path for header files which depends on wether the user modifiable
|
||||
# EIGEN_INCLUDE_INSTALL_DIR has been set by the user or not.
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR)
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
${EIGEN_INCLUDE_INSTALL_DIR}
|
||||
CACHE INTERNAL
|
||||
"The directory where we install the header files (internal)"
|
||||
)
|
||||
# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR)
|
||||
set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR}
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed")
|
||||
else()
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_PREFIX}/include/eigen3"
|
||||
CACHE INTERNAL
|
||||
"The directory where we install the header files (internal)"
|
||||
)
|
||||
"${CMAKE_INSTALL_INCLUDEDIR}/eigen3"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed"
|
||||
)
|
||||
endif()
|
||||
|
||||
set(CMAKEPACKAGE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_LIBDIR}/cmake/eigen3"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed"
|
||||
)
|
||||
set(PKGCONFIG_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_DATADIR}/pkgconfig"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed"
|
||||
)
|
||||
|
||||
# similar to set_target_properties but append the property instead of overwriting it
|
||||
macro(ei_add_target_property target prop value)
|
||||
|
||||
|
@ -324,21 +325,9 @@ install(FILES
|
|||
)
|
||||
|
||||
if(EIGEN_BUILD_PKGCONFIG)
|
||||
SET(path_separator ":")
|
||||
STRING(REPLACE ${path_separator} ";" pkg_config_libdir_search "$ENV{PKG_CONFIG_LIBDIR}")
|
||||
message(STATUS "searching for 'pkgconfig' directory in PKG_CONFIG_LIBDIR ( $ENV{PKG_CONFIG_LIBDIR} ), ${CMAKE_INSTALL_PREFIX}/share, and ${CMAKE_INSTALL_PREFIX}/lib")
|
||||
FIND_PATH(pkg_config_libdir pkgconfig ${pkg_config_libdir_search} ${CMAKE_INSTALL_PREFIX}/share ${CMAKE_INSTALL_PREFIX}/lib ${pkg_config_libdir_search})
|
||||
if(pkg_config_libdir)
|
||||
SET(pkg_config_install_dir ${pkg_config_libdir})
|
||||
message(STATUS "found ${pkg_config_libdir}/pkgconfig" )
|
||||
else(pkg_config_libdir)
|
||||
SET(pkg_config_install_dir ${CMAKE_INSTALL_PREFIX}/share)
|
||||
message(STATUS "pkgconfig not found; installing in ${pkg_config_install_dir}" )
|
||||
endif(pkg_config_libdir)
|
||||
|
||||
configure_file(eigen3.pc.in eigen3.pc)
|
||||
configure_file(eigen3.pc.in eigen3.pc @ONLY)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc
|
||||
DESTINATION ${pkg_config_install_dir}/pkgconfig
|
||||
DESTINATION ${PKGCONFIG_INSTALL_DIR}
|
||||
)
|
||||
endif(EIGEN_BUILD_PKGCONFIG)
|
||||
|
||||
|
@ -401,12 +390,15 @@ if(cmake_generator_tolower MATCHES "makefile")
|
|||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "Command | Description")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
|
||||
message(STATUS " | Eigen headers will then be installed to:")
|
||||
message(STATUS " | ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | To install Eigen headers to a separate location, do:")
|
||||
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
|
||||
message(STATUS "make install | Install Eigen. Headers will be installed to:")
|
||||
message(STATUS " | <CMAKE_INSTALL_PREFIX>/<INCLUDE_INSTALL_DIR>")
|
||||
message(STATUS " | Using the following values:")
|
||||
message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||
message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | Change the install location of Eigen headers using:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
|
||||
message(STATUS " | Or:")
|
||||
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
|
||||
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||
message(STATUS "make check | Build and run the unit-tests. Read this page:")
|
||||
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
|
@ -4,14 +4,10 @@
|
|||
## # The following are required to uses Dart and the Cdash dashboard
|
||||
## ENABLE_TESTING()
|
||||
## INCLUDE(CTest)
|
||||
set(CTEST_PROJECT_NAME "Eigen")
|
||||
set(CTEST_PROJECT_NAME "Eigen3.2")
|
||||
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
|
||||
|
||||
set(CTEST_DROP_METHOD "http")
|
||||
set(CTEST_DROP_SITE "manao.inria.fr")
|
||||
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
|
||||
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2")
|
||||
set(CTEST_DROP_SITE_CDASH TRUE)
|
||||
set(CTEST_PROJECT_SUBPROJECTS
|
||||
Official
|
||||
Unsupported
|
||||
)
|
|
@ -12,7 +12,7 @@ extern "C" {
|
|||
/** \ingroup Support_modules
|
||||
* \defgroup CholmodSupport_Module CholmodSupport module
|
||||
*
|
||||
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
* It provides the two following main factorization classes:
|
||||
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
|
||||
* - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
|
|
@ -95,7 +95,7 @@
|
|||
extern "C" {
|
||||
// In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
|
||||
// Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
|
||||
#ifdef __INTEL_COMPILER
|
||||
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110
|
||||
#include <immintrin.h>
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
|
@ -123,7 +123,7 @@
|
|||
#undef bool
|
||||
#undef vector
|
||||
#undef pixel
|
||||
#elif defined __ARM_NEON__
|
||||
#elif defined __ARM_NEON
|
||||
#define EIGEN_VECTORIZE
|
||||
#define EIGEN_VECTORIZE_NEON
|
||||
#include <arm_neon.h>
|
||||
|
@ -165,7 +165,7 @@
|
|||
#endif
|
||||
|
||||
// required for __cpuid, needs to be included after cmath
|
||||
#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
|
||||
#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE))
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
/** \ingroup Support_modules
|
||||
* \defgroup SPQRSupport_Module SuiteSparseQR module
|
||||
*
|
||||
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SPQRSupport>
|
|
@ -14,7 +14,7 @@
|
|||
/**
|
||||
* \defgroup SparseCore_Module SparseCore module
|
||||
*
|
||||
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
|
||||
* This module provides a sparse matrix representation, and basic associated matrix manipulations
|
||||
* and operations.
|
||||
*
|
||||
* See the \ref TutorialSparse "Sparse tutorial"
|
|
@ -12,7 +12,7 @@ extern "C" {
|
|||
/** \ingroup Support_modules
|
||||
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
||||
*
|
||||
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
* It provides the following factorization class:
|
||||
* - class UmfPackLU: a multifrontal sequential LU factorization.
|
||||
*
|
|
@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
|||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||
|
@ -274,30 +279,13 @@ template<> struct ldlt_inplace<Lower>
|
|||
return true;
|
||||
}
|
||||
|
||||
RealScalar cutoff(0), biggest_in_corner;
|
||||
|
||||
for (Index k = 0; k < size; ++k)
|
||||
{
|
||||
// Find largest diagonal element
|
||||
Index index_of_biggest_in_corner;
|
||||
biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += k;
|
||||
|
||||
if(k == 0)
|
||||
{
|
||||
// The biggest overall is the point of reference to which further diagonals
|
||||
// are compared; if any diagonal is negligible compared
|
||||
// to the largest overall, the algorithm bails.
|
||||
cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
|
||||
}
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
if(biggest_in_corner < cutoff)
|
||||
{
|
||||
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
|
||||
break;
|
||||
}
|
||||
|
||||
transpositions.coeffRef(k) = index_of_biggest_in_corner;
|
||||
if(k != index_of_biggest_in_corner)
|
||||
{
|
||||
|
@ -328,15 +316,20 @@ template<> struct ldlt_inplace<Lower>
|
|||
|
||||
if(k>0)
|
||||
{
|
||||
temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
|
||||
temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
|
||||
mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
|
||||
if(rs>0)
|
||||
A21.noalias() -= A20 * temp.head(k);
|
||||
}
|
||||
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
|
||||
A21 /= mat.coeffRef(k,k);
|
||||
|
||||
|
||||
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
|
||||
// was smaller than the cutoff value. However, soince LDLT is not rank-revealing
|
||||
// we should only make sure we do not introduce INF or NaN values.
|
||||
// LAPACK also uses 0 as the cutoff value.
|
||||
RealScalar realAkk = numext::real(mat.coeffRef(k,k));
|
||||
if((rs>0) && (abs(realAkk) > RealScalar(0)))
|
||||
A21 /= realAkk;
|
||||
|
||||
if (sign == PositiveSemiDef) {
|
||||
if (realAkk < 0) sign = Indefinite;
|
||||
} else if (sign == NegativeSemiDef) {
|
||||
|
@ -446,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
|
|||
template<typename MatrixType, int _UpLo>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
|
@ -454,6 +449,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
|||
m_transpositions.resize(size);
|
||||
m_isInitialized = false;
|
||||
m_temporary.resize(size);
|
||||
m_sign = internal::ZeroSign;
|
||||
|
||||
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
|
||||
|
||||
|
@ -468,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
|||
*/
|
||||
template<typename MatrixType, int _UpLo>
|
||||
template<typename Derived>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
|
||||
{
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized)
|
||||
|
@ -514,16 +510,21 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
|
|||
using std::abs;
|
||||
using std::max;
|
||||
typedef typename LDLTType::MatrixType MatrixType;
|
||||
typedef typename LDLTType::Scalar Scalar;
|
||||
typedef typename LDLTType::RealScalar RealScalar;
|
||||
const Diagonal<const MatrixType> vectorD = dec().vectorD();
|
||||
RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
|
||||
RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
|
||||
const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
|
||||
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
|
||||
// as motivated by LAPACK's xGELSS:
|
||||
// RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
|
||||
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
|
||||
// diagonal element is not well justified and to numerical issues in some cases.
|
||||
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
|
||||
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
|
||||
|
||||
for (Index i = 0; i < vectorD.size(); ++i) {
|
||||
if(abs(vectorD(i)) > tolerance)
|
||||
dst.row(i) /= vectorD(i);
|
||||
dst.row(i) /= vectorD(i);
|
||||
else
|
||||
dst.row(i).setZero();
|
||||
dst.row(i).setZero();
|
||||
}
|
||||
|
||||
// dst = L^-T (D^-1 L^-1 P b)
|
||||
|
@ -576,7 +577,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
|
|||
// L^* P
|
||||
res = matrixU() * res;
|
||||
// D(L^*P)
|
||||
res = vectorD().asDiagonal() * res;
|
||||
res = vectorD().real().asDiagonal() * res;
|
||||
// L(DL^*P)
|
||||
res = matrixL() * res;
|
||||
// P^T (LDL^*P)
|
|
@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
|
|||
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store L
|
||||
* The strict upper part is not used and even not initialized.
|
||||
|
@ -283,7 +289,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
|||
return k;
|
||||
mat.coeffRef(k,k) = x = sqrt(x);
|
||||
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
|
||||
if (rs>0) A21 *= RealScalar(1)/x;
|
||||
if (rs>0) A21 /= x;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
|
|||
template<typename MatrixType, int _UpLo>
|
||||
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
m_matrix.resize(size, size);
|
|
@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
|
|||
lda = m.outerStride(); \
|
||||
\
|
||||
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
|
||||
info = (info==0) ? Success : NumericalIssue; \
|
||||
info = (info==0) ? -1 : info>0 ? info-1 : size; \
|
||||
return info; \
|
||||
} \
|
||||
}; \
|
|
@ -78,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
|||
{
|
||||
res.itype = CHOLMOD_INT;
|
||||
}
|
||||
else if (internal::is_same<_Index,UF_long>::value)
|
||||
else if (internal::is_same<_Index,SuiteSparse_long>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_LONG;
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
|
|||
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLLT() {}
|
||||
|
@ -442,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
|
|||
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLDLT() {}
|
||||
|
@ -487,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
|
|||
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSupernodalLLT() {}
|
||||
|
@ -534,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
|
|||
CholmodDecomposition(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodDecomposition() {}
|
|
@ -124,6 +124,21 @@ class Array
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
Array(Array&& other)
|
||||
: Base(std::move(other))
|
||||
{
|
||||
Base::_check_template_params();
|
||||
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||
Base::_set_noalias(other);
|
||||
}
|
||||
Array& operator=(Array&& other)
|
||||
{
|
||||
other.swap(*this);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
|
@ -46,9 +46,6 @@ template<typename Derived> class ArrayBase
|
|||
|
||||
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
|
||||
|
||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
|
@ -56,6 +53,7 @@ template<typename Derived> class ArrayBase
|
|||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
typedef DenseBase<Derived> Base;
|
||||
using Base::operator*;
|
||||
using Base::RowsAtCompileTime;
|
||||
using Base::ColsAtCompileTime;
|
||||
using Base::SizeAtCompileTime;
|
|
@ -29,6 +29,11 @@ struct traits<ArrayWrapper<ExpressionType> >
|
|||
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
|
||||
{
|
||||
typedef ArrayXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
|
||||
Flags = Flags0 & ~NestByRefBit
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -149,6 +154,11 @@ struct traits<MatrixWrapper<ExpressionType> >
|
|||
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
|
||||
{
|
||||
typedef MatrixXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
|
||||
Flags = Flags0 & ~NestByRefBit
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
|
|||
typedef typename Derived1::Index Index;
|
||||
static inline void run(Derived1 &dst, const Derived2 &src)
|
||||
{
|
||||
typedef packet_traits<typename Derived1::Scalar> PacketTraits;
|
||||
typedef typename Derived1::Scalar Scalar;
|
||||
typedef packet_traits<Scalar> PacketTraits;
|
||||
enum {
|
||||
packetSize = PacketTraits::size,
|
||||
alignable = PacketTraits::AlignedOnScalar,
|
||||
dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
|
||||
dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
|
||||
dstAlignment = alignable ? Aligned : int(dstIsAligned),
|
||||
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
|
||||
};
|
||||
const Scalar *dst_ptr = &dst.coeffRef(0,0);
|
||||
if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
|
||||
{
|
||||
// the pointer is not aligend-on scalar, so alignment is not possible
|
||||
return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
|
||||
}
|
||||
const Index packetAlignedMask = packetSize - 1;
|
||||
const Index innerSize = dst.innerSize();
|
||||
const Index outerSize = dst.outerSize();
|
||||
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
|
||||
Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
|
||||
: internal::first_aligned(&dst.coeffRef(0,0), innerSize);
|
||||
Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
|
||||
|
||||
for(Index outer = 0; outer < outerSize; ++outer)
|
||||
{
|
|
@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
|||
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
||||
: int(traits<XprType>::MaxColsAtCompileTime),
|
||||
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
|
||||
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
IsDense = is_same<StorageKind,Dense>::value,
|
||||
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
: XprTypeIsRowMajor,
|
||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
|
@ -81,7 +82,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
|||
&& (InnerStrideAtCompileTime == 1)
|
||||
? PacketAccessBit : 0,
|
||||
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
|
||||
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
|
||||
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
|
||||
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
|
|
@ -43,6 +43,17 @@ struct CommaInitializer
|
|||
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
||||
}
|
||||
|
||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
||||
* absence of return value optimization to avoid assertions during destruction. */
|
||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
||||
inline CommaInitializer(const CommaInitializer& o)
|
||||
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
|
||||
// Mark original object as finished. In absence of R-value references we need to const_cast:
|
||||
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
|
||||
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
|
||||
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
|
||||
}
|
||||
|
||||
/* inserts a scalar value in the target matrix */
|
||||
CommaInitializer& operator,(const Scalar& s)
|
||||
{
|
|
@ -81,7 +81,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
|||
)
|
||||
),
|
||||
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
|
||||
CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
|
||||
Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost),
|
||||
CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits<BinaryOp>::Cost)
|
||||
};
|
||||
};
|
||||
} // end namespace internal
|
|
@ -47,7 +47,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
|
|||
Flags = _XprTypeNested::Flags & (
|
||||
HereditaryBits | LinearAccessBit | AlignedBit
|
||||
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
|
||||
CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
|
||||
CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits<UnaryOp>::Cost)
|
||||
};
|
||||
};
|
||||
}
|
|
@ -38,7 +38,7 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> >
|
|||
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
enum {
|
||||
Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
|
||||
CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost,
|
||||
CoeffReadCost = EIGEN_ADD_COST(traits<_MatrixTypeNested>::CoeffReadCost, functor_traits<ViewOp>::Cost),
|
||||
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
|
||||
// need to cast the sizeof's from size_t to int explicitly, otherwise:
|
||||
// "error: no integral type can represent all of the enumerator values
|
|
@ -40,15 +40,14 @@ static inline void check_DenseIndex_is_signed() {
|
|||
*/
|
||||
template<typename Derived> class DenseBase
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
|
||||
: public internal::special_scalar_op_base<Derived, typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||
DenseCoeffsBase<Derived> >
|
||||
#else
|
||||
: public DenseCoeffsBase<Derived>
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
{
|
||||
public:
|
||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
|
@ -63,8 +62,9 @@ template<typename Derived> class DenseBase
|
|||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef internal::special_scalar_op_base<Derived,Scalar,RealScalar, DenseCoeffsBase<Derived> > Base;
|
||||
|
||||
typedef DenseCoeffsBase<Derived> Base;
|
||||
using Base::operator*;
|
||||
using Base::derived;
|
||||
using Base::const_cast_derived;
|
||||
using Base::rows;
|
||||
|
@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
|
|||
/** \returns the number of nonzero coefficients which is in practice the number
|
||||
* of stored coefficients. */
|
||||
inline Index nonZeros() const { return size(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
* \sa rows(), cols(), IsVectorAtCompileTime. */
|
||||
|
||||
/** \returns the outer size.
|
||||
*
|
||||
|
@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
|
|||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
/** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \internal Evaluates \a other into *this. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
CommaInitializer<Derived> operator<< (const Scalar& s);
|
||||
|
||||
|
@ -462,8 +460,10 @@ template<typename Derived> class DenseBase
|
|||
template<int p> RealScalar lpNorm() const;
|
||||
|
||||
template<int RowFactor, int ColFactor>
|
||||
const Replicate<Derived,RowFactor,ColFactor> replicate() const;
|
||||
const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
|
||||
inline const Replicate<Derived,RowFactor,ColFactor> replicate() const;
|
||||
|
||||
typedef Replicate<Derived,Dynamic,Dynamic> ReplicateReturnType;
|
||||
inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const;
|
||||
|
||||
typedef Reverse<Derived, BothDirections> ReverseReturnType;
|
||||
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
|
|
@ -24,6 +24,14 @@ namespace internal {
|
|||
|
||||
struct constructor_without_unaligned_array_assert {};
|
||||
|
||||
template<typename T, int Size> void check_static_allocation_size()
|
||||
{
|
||||
// if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
|
||||
#if EIGEN_STACK_ALLOCATION_LIMIT
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
#endif
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
|
||||
* to 16 bytes boundary if the total size is a multiple of 16 bytes.
|
||||
|
@ -38,12 +46,12 @@ struct plain_array
|
|||
|
||||
plain_array()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
|
||||
plain_array(constructor_without_unaligned_array_assert)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -76,12 +84,12 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16>
|
|||
plain_array()
|
||||
{
|
||||
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
|
||||
plain_array(constructor_without_unaligned_array_assert)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -114,33 +122,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
|||
{
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
public:
|
||||
inline DenseStorage() {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other) m_data = other.m_data;
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// null matrix
|
||||
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
|
||||
{
|
||||
public:
|
||||
inline DenseStorage() {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void swap(DenseStorage& ) {}
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline const T *data() const { return 0; }
|
||||
inline T *data() { return 0; }
|
||||
DenseStorage() {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||
DenseStorage(const DenseStorage&) {}
|
||||
DenseStorage& operator=(const DenseStorage&) { return *this; }
|
||||
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void swap(DenseStorage& ) {}
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
const T *data() const { return 0; }
|
||||
T *data() { return 0; }
|
||||
};
|
||||
|
||||
// more specializations for null matrices; these are necessary to resolve ambiguities
|
||||
|
@ -160,18 +176,29 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
|||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||
inline void swap(DenseStorage& other)
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_rows = other.m_rows;
|
||||
m_cols = other.m_cols;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||
void swap(DenseStorage& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows() const {return m_rows;}
|
||||
inline DenseIndex cols() const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseIndex rows() const {return m_rows;}
|
||||
DenseIndex cols() const {return m_cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed width
|
||||
|
@ -180,17 +207,27 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
|||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline DenseStorage() : m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_rows(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
inline DenseIndex cols(void) const {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_rows = other.m_rows;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
DenseIndex cols(void) const {return _Cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed height
|
||||
|
@ -199,17 +236,27 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
|||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows(void) const {return _Rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_cols = other.m_cols;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
DenseIndex rows(void) const {return _Rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// purely dynamic matrix.
|
||||
|
@ -219,18 +266,35 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
|||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||
inline void swap(DenseStorage& other)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_rows(std::move(other.m_rows))
|
||||
, m_cols(std::move(other.m_cols))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_rows, other.m_rows);
|
||||
swap(m_cols, other.m_cols);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||
void swap(DenseStorage& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
||||
m_rows = nbRows;
|
||||
|
@ -250,8 +314,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
|||
m_rows = nbRows;
|
||||
m_cols = nbCols;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
|
||||
|
@ -260,15 +327,30 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
|||
T *m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||
DenseStorage() : m_data(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_cols(std::move(other.m_cols))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_cols, other.m_cols);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
||||
m_cols = nbCols;
|
||||
|
@ -286,8 +368,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
|||
}
|
||||
m_cols = nbCols;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
|
||||
|
@ -296,15 +381,30 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
|||
T *m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||
DenseStorage() : m_data(0), m_rows(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_rows(std::move(other.m_rows))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_rows, other.m_rows);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
||||
m_rows = nbRows;
|
||||
|
@ -322,8 +422,11 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
|||
}
|
||||
m_rows = nbRows;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
|
@ -190,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
|
|||
*
|
||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||
template<typename Derived>
|
||||
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
|
||||
inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
|
||||
MatrixBase<Derived>::diagonal(Index index)
|
||||
{
|
||||
return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
|
||||
return DiagonalDynamicIndexReturnType(derived(), index);
|
||||
}
|
||||
|
||||
/** This is the const version of diagonal(Index). */
|
||||
template<typename Derived>
|
||||
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
|
||||
inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
|
||||
MatrixBase<Derived>::diagonal(Index index) const
|
||||
{
|
||||
return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
|
||||
return ConstDiagonalDynamicIndexReturnType(derived(), index);
|
||||
}
|
||||
|
||||
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
|
|
@ -34,8 +34,9 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
|
|||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
||||
|
||||
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
|
||||
Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
Cost0 = EIGEN_ADD_COST(NumTraits<Scalar>::MulCost, MatrixType::CoeffReadCost),
|
||||
CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost)
|
||||
};
|
||||
};
|
||||
}
|
|
@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
|
|||
};
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* \brief Template functors for comparison of two scalars
|
||||
* \todo Implement packet-comparisons
|
||||
*/
|
||||
template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
|
||||
|
||||
template<typename Scalar, ComparisonName cmp>
|
||||
struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
|
||||
enum {
|
||||
Cost = NumTraits<Scalar>::AddCost,
|
||||
PacketAccess = false
|
||||
};
|
||||
};
|
||||
|
||||
template<ComparisonName Cmp, typename Scalar>
|
||||
struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
|
||||
typedef bool type;
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
|
||||
};
|
||||
|
||||
// unary functors:
|
||||
|
||||
/** \internal
|
||||
|
@ -589,7 +630,7 @@ struct linspaced_op_impl<Scalar,true>
|
|||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
|
||||
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
|
||||
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
|
||||
|
||||
const Scalar m_low;
|
||||
const Scalar m_step;
|
||||
|
@ -609,7 +650,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
|
|||
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
|
||||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
|
@ -232,7 +232,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
|
|||
// FIXME not very good if rhs is real and lhs complex while alpha is real too
|
||||
const Index cols = dest.cols();
|
||||
for (Index j=0; j<cols; ++j)
|
||||
func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
|
||||
func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
|
||||
}
|
||||
|
||||
// Row major
|
||||
|
@ -243,7 +243,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
|
|||
// FIXME not very good if lhs is real and rhs complex while alpha is real too
|
||||
const Index rows = dest.rows();
|
||||
for (Index i=0; i<rows; ++i)
|
||||
func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
|
||||
func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
|
||||
}
|
||||
|
||||
template<typename Lhs, typename Rhs>
|
||||
|
@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
|
|||
class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
|
||||
{
|
||||
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
|
||||
public:
|
||||
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
|
||||
|
@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
|
|||
|
||||
template<typename Dest>
|
||||
inline void evalTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void addTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void subTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -425,15 +425,18 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
|||
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
|
||||
* RhsBlasTraits::extractScalarFactor(prod.rhs());
|
||||
|
||||
// make sure Dest is a compile-time vector type (bug 1166)
|
||||
typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;
|
||||
|
||||
enum {
|
||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||
EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
|
||||
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
|
||||
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
||||
MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
MightCannotUseDest = (ActualDest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
};
|
||||
|
||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
|
@ -522,7 +525,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
|||
actualLhs.rows(), actualLhs.cols(),
|
||||
actualLhs.data(), actualLhs.outerStride(),
|
||||
actualRhsPtr, 1,
|
||||
dest.data(), dest.innerStride(),
|
||||
dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)
|
||||
actualAlpha);
|
||||
}
|
||||
};
|
|
@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
||||
}
|
||||
|
||||
inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
checkSanity();
|
||||
|
@ -149,6 +149,10 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
checkSanity();
|
||||
}
|
||||
|
||||
#ifdef EIGEN_MAPBASE_PLUGIN
|
||||
#include EIGEN_MAPBASE_PLUGIN
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
void checkSanity() const
|
||||
|
@ -157,7 +161,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
internal::inner_stride_at_compile_time<Derived>::ret==1),
|
||||
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
|
||||
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
|
||||
&& "data is not aligned");
|
||||
&& "input pointer is not aligned on a 16 byte boundary");
|
||||
}
|
||||
|
||||
PointerType m_data;
|
||||
|
@ -168,6 +172,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
template<typename Derived> class MapBase<Derived, WriteAccessors>
|
||||
: public MapBase<Derived, ReadOnlyAccessors>
|
||||
{
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
|
||||
public:
|
||||
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> Base;
|
||||
|
@ -230,13 +235,17 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
|
|||
|
||||
Derived& operator=(const MapBase& other)
|
||||
{
|
||||
Base::Base::operator=(other);
|
||||
ReadOnlyMapBase::Base::operator=(other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
using Base::Base::operator=;
|
||||
// In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
|
||||
// see bugs 821 and 920.
|
||||
using ReadOnlyMapBase::Base::operator=;
|
||||
};
|
||||
|
||||
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPBASE_H
|
|
@ -294,7 +294,7 @@ struct hypot_impl
|
|||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
if(p==RealScalar(0)) return 0;
|
||||
if(p==RealScalar(0)) return RealScalar(0);
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
|
@ -707,21 +707,21 @@ struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::
|
|||
|
||||
template<typename Scalar, typename OtherScalar>
|
||||
inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
inline bool isApprox(const Scalar& x, const Scalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
|
||||
}
|
|
@ -211,6 +211,21 @@ class Matrix
|
|||
: Base(internal::constructor_without_unaligned_array_assert())
|
||||
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
Matrix(Matrix&& other)
|
||||
: Base(std::move(other))
|
||||
{
|
||||
Base::_check_template_params();
|
||||
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||
Base::_set_noalias(other);
|
||||
}
|
||||
Matrix& operator=(Matrix&& other)
|
||||
{
|
||||
other.swap(*this);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
|
@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
|
|||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
|
||||
|
||||
template<typename MatrixPower, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
||||
|
@ -215,7 +213,7 @@ template<typename Derived> class MatrixBase
|
|||
|
||||
typedef Diagonal<Derived> DiagonalReturnType;
|
||||
DiagonalReturnType diagonal();
|
||||
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
|
||||
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
|
||||
ConstDiagonalReturnType diagonal() const;
|
||||
|
||||
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
|
||||
|
@ -223,16 +221,12 @@ template<typename Derived> class MatrixBase
|
|||
|
||||
template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
|
||||
template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
|
||||
|
||||
typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;
|
||||
typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;
|
||||
|
||||
// Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
|
||||
// On the other hand they confuse MSVC8...
|
||||
#if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
|
||||
typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
|
||||
typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
|
||||
#else
|
||||
typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
|
||||
typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
|
||||
#endif
|
||||
DiagonalDynamicIndexReturnType diagonal(Index index);
|
||||
ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
|
||||
|
@ -446,6 +440,15 @@ template<typename Derived> class MatrixBase
|
|||
template<typename OtherScalar>
|
||||
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
||||
|
||||
///////// SparseCore module /////////
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
|
||||
cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return other.cwiseProduct(derived());
|
||||
}
|
||||
|
||||
///////// MatrixFunctions module /////////
|
||||
|
||||
typedef typename internal::stem_function<Scalar>::type StemFunction;
|
|
@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
|
|||
template<typename Other> friend
|
||||
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
|
||||
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
|
||||
|
||||
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
|
||||
*
|
||||
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
|
||||
*/
|
||||
Index determinant() const
|
||||
{
|
||||
Index res = 1;
|
||||
Index n = size();
|
||||
Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
|
||||
mask.fill(false);
|
||||
Index r = 0;
|
||||
while(r < n)
|
||||
{
|
||||
// search for the next seed
|
||||
while(r<n && mask[r]) r++;
|
||||
if(r>=n)
|
||||
break;
|
||||
// we got one, let's follow it until we are back to the seed
|
||||
Index k0 = r++;
|
||||
mask.coeffRef(k0) = true;
|
||||
for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
|
||||
{
|
||||
mask.coeffRef(k) = true;
|
||||
res = -res;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -555,7 +584,10 @@ struct permut_matrix_product_retval
|
|||
const Index n = Side==OnTheLeft ? rows() : cols();
|
||||
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
|
||||
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
|
||||
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
|
||||
if( is_same<MatrixTypeNestedCleaned,Dest>::value
|
||||
&& blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
|
||||
&& blas_traits<Dest>::HasUsableDirectAccess
|
||||
&& extract_data(dst) == extract_data(m_matrix))
|
||||
{
|
||||
// apply the permutation inplace
|
||||
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
|
|
@ -437,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
PlainObjectBase(PlainObjectBase&& other)
|
||||
: m_storage( std::move(other.m_storage) )
|
||||
{
|
||||
}
|
||||
|
||||
PlainObjectBase& operator=(PlainObjectBase&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_storage, other.m_storage);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Copy constructor */
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
|
||||
: m_storage(a_size, nbRows, nbCols)
|
||||
{
|
||||
|
@ -573,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
|||
: (rows() == other.rows() && cols() == other.cols())))
|
||||
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(other);
|
||||
if(this->size()==0)
|
||||
resizeLike(other);
|
||||
#else
|
||||
resizeLike(other);
|
||||
#endif
|
|
@ -85,7 +85,14 @@ class ProductBase : public MatrixBase<Derived>
|
|||
|
||||
public:
|
||||
|
||||
#ifndef EIGEN_NO_MALLOC
|
||||
typedef typename Base::PlainObject BasePlainObject;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime==1?1:Dynamic,ColsAtCompileTime==1?1:Dynamic,BasePlainObject::Options> DynPlainObject;
|
||||
typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)),
|
||||
BasePlainObject, DynPlainObject>::type PlainObject;
|
||||
#else
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
#endif
|
||||
|
||||
ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
|
||||
: m_lhs(a_lhs), m_rhs(a_rhs)
|
||||
|
@ -180,7 +187,12 @@ namespace internal {
|
|||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
|
||||
struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
|
||||
{
|
||||
typedef PlainObject const& type;
|
||||
typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
|
||||
};
|
||||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
|
||||
struct nested<const GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
|
||||
{
|
||||
typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
|
||||
};
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue