$treeview $search $mathjax
Eigen-unsupported
3.2.5
$projectbrief
|
$projectbrief
|
$searchbox |
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk> 00005 // Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net> 00006 // 00007 // This Source Code Form is subject to the terms of the Mozilla 00008 // Public License v. 2.0. If a copy of the MPL was not distributed 00009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00010 00011 #ifndef EIGEN_MATRIX_LOGARITHM 00012 #define EIGEN_MATRIX_LOGARITHM 00013 00014 #ifndef M_PI 00015 #define M_PI 3.141592653589793238462643383279503L 00016 #endif 00017 00018 namespace Eigen { 00019 00030 template <typename MatrixType> 00031 class MatrixLogarithmAtomic 00032 { 00033 public: 00034 00035 typedef typename MatrixType::Scalar Scalar; 00036 // typedef typename MatrixType::Index Index; 00037 typedef typename NumTraits<Scalar>::Real RealScalar; 00038 // typedef typename internal::stem_function<Scalar>::type StemFunction; 00039 // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; 00040 00042 MatrixLogarithmAtomic() { } 00043 00048 MatrixType compute(const MatrixType& A); 00049 00050 private: 00051 00052 void compute2x2(const MatrixType& A, MatrixType& result); 00053 void computeBig(const MatrixType& A, MatrixType& result); 00054 int getPadeDegree(float normTminusI); 00055 int getPadeDegree(double normTminusI); 00056 int getPadeDegree(long double normTminusI); 00057 void computePade(MatrixType& result, const MatrixType& T, int degree); 00058 void computePade3(MatrixType& result, const MatrixType& T); 00059 void computePade4(MatrixType& result, const MatrixType& T); 00060 void computePade5(MatrixType& result, const MatrixType& T); 00061 void computePade6(MatrixType& result, const MatrixType& T); 00062 void computePade7(MatrixType& result, const MatrixType& T); 00063 void computePade8(MatrixType& result, const MatrixType& T); 00064 void computePade9(MatrixType& result, const MatrixType& T); 00065 void computePade10(MatrixType& result, const MatrixType& T); 00066 void computePade11(MatrixType& result, const MatrixType& T); 00067 00068 static const int minPadeDegree = 3; 00069 static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision 00070 std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision 00071 std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision 00072 std::numeric_limits<RealScalar>::digits<=106? 10: // double-double 00073 11; // quadruple precision 00074 00075 // Prevent copying 00076 MatrixLogarithmAtomic(const MatrixLogarithmAtomic&); 00077 MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&); 00078 }; 00079 00081 template <typename MatrixType> 00082 MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A) 00083 { 00084 using std::log; 00085 MatrixType result(A.rows(), A.rows()); 00086 if (A.rows() == 1) 00087 result(0,0) = log(A(0,0)); 00088 else if (A.rows() == 2) 00089 compute2x2(A, result); 00090 else 00091 computeBig(A, result); 00092 return result; 00093 } 00094 00096 template <typename MatrixType> 00097 void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result) 00098 { 00099 using std::abs; 00100 using std::ceil; 00101 using std::imag; 00102 using std::log; 00103 00104 Scalar logA00 = log(A(0,0)); 00105 Scalar logA11 = log(A(1,1)); 00106 00107 result(0,0) = logA00; 00108 result(1,0) = Scalar(0); 00109 result(1,1) = logA11; 00110 00111 if (A(0,0) == A(1,1)) { 00112 result(0,1) = A(0,1) / A(0,0); 00113 } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) { 00114 result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0)); 00115 } else { 00116 // computation in previous branch is inaccurate if A(1,1) \approx A(0,0) 00117 int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI))); 00118 Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0); 00119 result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y; 00120 } 00121 } 00122 00125 template <typename MatrixType> 00126 void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result) 00127 { 00128 using std::pow; 00129 int numberOfSquareRoots = 0; 00130 int numberOfExtraSquareRoots = 0; 00131 int degree; 00132 MatrixType T = A, sqrtT; 00133 const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision 00134 maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision 00135 maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision 00136 maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double 00137 1.1880960220216759245467951592883642e-1L; // quadruple precision 00138 00139 while (true) { 00140 RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); 00141 if (normTminusI < maxNormForPade) { 00142 degree = getPadeDegree(normTminusI); 00143 int degree2 = getPadeDegree(normTminusI / RealScalar(2)); 00144 if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) 00145 break; 00146 ++numberOfExtraSquareRoots; 00147 } 00148 MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT); 00149 T = sqrtT.template triangularView<Upper>(); 00150 ++numberOfSquareRoots; 00151 } 00152 00153 computePade(result, T, degree); 00154 result *= pow(RealScalar(2), numberOfSquareRoots); 00155 } 00156 00157 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */ 00158 template <typename MatrixType> 00159 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI) 00160 { 00161 const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1, 00162 5.3149729967117310e-1 }; 00163 int degree = 3; 00164 for (; degree <= maxPadeDegree; ++degree) 00165 if (normTminusI <= maxNormForPade[degree - minPadeDegree]) 00166 break; 00167 return degree; 00168 } 00169 00170 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */ 00171 template <typename MatrixType> 00172 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI) 00173 { 00174 const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, 00175 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; 00176 int degree = 3; 00177 for (; degree <= maxPadeDegree; ++degree) 00178 if (normTminusI <= maxNormForPade[degree - minPadeDegree]) 00179 break; 00180 return degree; 00181 } 00182 00183 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */ 00184 template <typename MatrixType> 00185 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI) 00186 { 00187 #if LDBL_MANT_DIG == 53 // double precision 00188 const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L, 00189 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L }; 00190 #elif LDBL_MANT_DIG <= 64 // extended precision 00191 const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L, 00192 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L, 00193 2.32777776523703892094e-1L }; 00194 #elif LDBL_MANT_DIG <= 106 // double-double 00195 const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */, 00196 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L, 00197 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L, 00198 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L, 00199 1.05026503471351080481093652651105e-1L }; 00200 #else // quadruple precision 00201 const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */, 00202 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L, 00203 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L, 00204 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L, 00205 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L }; 00206 #endif 00207 int degree = 3; 00208 for (; degree <= maxPadeDegree; ++degree) 00209 if (normTminusI <= maxNormForPade[degree - minPadeDegree]) 00210 break; 00211 return degree; 00212 } 00213 00214 /* \brief Compute Pade approximation to matrix logarithm */ 00215 template <typename MatrixType> 00216 void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree) 00217 { 00218 switch (degree) { 00219 case 3: computePade3(result, T); break; 00220 case 4: computePade4(result, T); break; 00221 case 5: computePade5(result, T); break; 00222 case 6: computePade6(result, T); break; 00223 case 7: computePade7(result, T); break; 00224 case 8: computePade8(result, T); break; 00225 case 9: computePade9(result, T); break; 00226 case 10: computePade10(result, T); break; 00227 case 11: computePade11(result, T); break; 00228 default: assert(false); // should never happen 00229 } 00230 } 00231 00232 template <typename MatrixType> 00233 void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T) 00234 { 00235 const int degree = 3; 00236 const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, 00237 0.8872983346207416885179265399782400L }; 00238 const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, 00239 0.2777777777777777777777777777777778L }; 00240 eigen_assert(degree <= maxPadeDegree); 00241 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00242 result.setZero(T.rows(), T.rows()); 00243 for (int k = 0; k < degree; ++k) 00244 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00245 .template triangularView<Upper>().solve(TminusI); 00246 } 00247 00248 template <typename MatrixType> 00249 void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T) 00250 { 00251 const int degree = 4; 00252 const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, 00253 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }; 00254 const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, 00255 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }; 00256 eigen_assert(degree <= maxPadeDegree); 00257 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00258 result.setZero(T.rows(), T.rows()); 00259 for (int k = 0; k < degree; ++k) 00260 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00261 .template triangularView<Upper>().solve(TminusI); 00262 } 00263 00264 template <typename MatrixType> 00265 void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T) 00266 { 00267 const int degree = 5; 00268 const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, 00269 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L, 00270 0.9530899229693319963988134391496965L }; 00271 const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, 00272 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, 00273 0.1184634425280945437571320203599587L }; 00274 eigen_assert(degree <= maxPadeDegree); 00275 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00276 result.setZero(T.rows(), T.rows()); 00277 for (int k = 0; k < degree; ++k) 00278 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00279 .template triangularView<Upper>().solve(TminusI); 00280 } 00281 00282 template <typename MatrixType> 00283 void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T) 00284 { 00285 const int degree = 6; 00286 const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, 00287 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L, 00288 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L }; 00289 const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, 00290 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, 00291 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }; 00292 eigen_assert(degree <= maxPadeDegree); 00293 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00294 result.setZero(T.rows(), T.rows()); 00295 for (int k = 0; k < degree; ++k) 00296 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00297 .template triangularView<Upper>().solve(TminusI); 00298 } 00299 00300 template <typename MatrixType> 00301 void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T) 00302 { 00303 const int degree = 7; 00304 const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, 00305 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L, 00306 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L, 00307 0.9745539561713792622630948420239256L }; 00308 const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, 00309 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, 00310 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, 00311 0.0647424830844348466353057163395410L }; 00312 eigen_assert(degree <= maxPadeDegree); 00313 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00314 result.setZero(T.rows(), T.rows()); 00315 for (int k = 0; k < degree; ++k) 00316 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00317 .template triangularView<Upper>().solve(TminusI); 00318 } 00319 00320 template <typename MatrixType> 00321 void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T) 00322 { 00323 const int degree = 8; 00324 const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, 00325 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L, 00326 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L, 00327 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L }; 00328 const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, 00329 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, 00330 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, 00331 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }; 00332 eigen_assert(degree <= maxPadeDegree); 00333 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00334 result.setZero(T.rows(), T.rows()); 00335 for (int k = 0; k < degree; ++k) 00336 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00337 .template triangularView<Upper>().solve(TminusI); 00338 } 00339 00340 template <typename MatrixType> 00341 void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T) 00342 { 00343 const int degree = 9; 00344 const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, 00345 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L, 00346 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L, 00347 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L, 00348 0.9840801197538130449177881014518364L }; 00349 const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, 00350 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L, 00351 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, 00352 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, 00353 0.0406371941807872059859460790552618L }; 00354 eigen_assert(degree <= maxPadeDegree); 00355 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00356 result.setZero(T.rows(), T.rows()); 00357 for (int k = 0; k < degree; ++k) 00358 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00359 .template triangularView<Upper>().solve(TminusI); 00360 } 00361 00362 template <typename MatrixType> 00363 void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T) 00364 { 00365 const int degree = 10; 00366 const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, 00367 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L, 00368 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L, 00369 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L, 00370 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L }; 00371 const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, 00372 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L, 00373 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, 00374 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, 00375 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }; 00376 eigen_assert(degree <= maxPadeDegree); 00377 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00378 result.setZero(T.rows(), T.rows()); 00379 for (int k = 0; k < degree; ++k) 00380 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00381 .template triangularView<Upper>().solve(TminusI); 00382 } 00383 00384 template <typename MatrixType> 00385 void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T) 00386 { 00387 const int degree = 11; 00388 const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, 00389 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L, 00390 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L, 00391 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L, 00392 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L, 00393 0.9891143290730284964019690005614287L }; 00394 const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, 00395 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L, 00396 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L, 00397 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, 00398 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, 00399 0.0278342835580868332413768602212743L }; 00400 eigen_assert(degree <= maxPadeDegree); 00401 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); 00402 result.setZero(T.rows(), T.rows()); 00403 for (int k = 0; k < degree; ++k) 00404 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) 00405 .template triangularView<Upper>().solve(TminusI); 00406 } 00407 00420 template<typename Derived> class MatrixLogarithmReturnValue 00421 : public ReturnByValue<MatrixLogarithmReturnValue<Derived> > 00422 { 00423 public: 00424 00425 typedef typename Derived::Scalar Scalar; 00426 typedef typename Derived::Index Index; 00427 00432 MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } 00433 00438 template <typename ResultType> 00439 inline void evalTo(ResultType& result) const 00440 { 00441 typedef typename Derived::PlainObject PlainObject; 00442 typedef internal::traits<PlainObject> Traits; 00443 static const int RowsAtCompileTime = Traits::RowsAtCompileTime; 00444 static const int ColsAtCompileTime = Traits::ColsAtCompileTime; 00445 static const int Options = PlainObject::Options; 00446 typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar; 00447 typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType; 00448 typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType; 00449 AtomicType atomic; 00450 00451 const PlainObject Aevaluated = m_A.eval(); 00452 MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic); 00453 mf.compute(result); 00454 } 00455 00456 Index rows() const { return m_A.rows(); } 00457 Index cols() const { return m_A.cols(); } 00458 00459 private: 00460 typename internal::nested<Derived>::type m_A; 00461 00462 MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&); 00463 }; 00464 00465 namespace internal { 00466 template<typename Derived> 00467 struct traits<MatrixLogarithmReturnValue<Derived> > 00468 { 00469 typedef typename Derived::PlainObject ReturnType; 00470 }; 00471 } 00472 00473 00474 /********** MatrixBase method **********/ 00475 00476 00477 template <typename Derived> 00478 const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const 00479 { 00480 eigen_assert(rows() == cols()); 00481 return MatrixLogarithmReturnValue<Derived>(derived()); 00482 } 00483 00484 } // end namespace Eigen 00485 00486 #endif // EIGEN_MATRIX_LOGARITHM