00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
00026 #define EIGEN_GENERAL_MATRIX_MATRIX_H
00027
00028 namespace internal {
00029
00030 template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
00031
00032
00033 template<
00034 typename Index,
00035 typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
00036 typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
00037 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
00038 {
00039 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
00040 static EIGEN_STRONG_INLINE void run(
00041 Index rows, Index cols, Index depth,
00042 const LhsScalar* lhs, Index lhsStride,
00043 const RhsScalar* rhs, Index rhsStride,
00044 ResScalar* res, Index resStride,
00045 ResScalar alpha,
00046 level3_blocking<RhsScalar,LhsScalar>& blocking,
00047 GemmParallelInfo<Index>* info = 0)
00048 {
00049
00050 general_matrix_matrix_product<Index,
00051 RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
00052 LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
00053 ColMajor>
00054 ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
00055 }
00056 };
00057
00058
00059
00060 template<
00061 typename Index,
00062 typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
00063 typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
00064 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
00065 {
00066 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
00067 static void run(Index rows, Index cols, Index depth,
00068 const LhsScalar* _lhs, Index lhsStride,
00069 const RhsScalar* _rhs, Index rhsStride,
00070 ResScalar* res, Index resStride,
00071 ResScalar alpha,
00072 level3_blocking<LhsScalar,RhsScalar>& blocking,
00073 GemmParallelInfo<Index>* info = 0)
00074 {
00075 const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
00076 const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
00077
00078 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
00079
00080 Index kc = blocking.kc();
00081 Index mc = std::min(rows,blocking.mc());
00082
00083
00084 gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
00085 gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
00086 gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
00087
00088 #ifdef EIGEN_HAS_OPENMP
00089 if(info)
00090 {
00091
00092 Index tid = omp_get_thread_num();
00093 Index threads = omp_get_num_threads();
00094
00095 std::size_t sizeA = kc*mc;
00096 std::size_t sizeW = kc*Traits::WorkSpaceFactor;
00097 LhsScalar* blockA = ei_aligned_stack_new(LhsScalar, sizeA);
00098 RhsScalar* w = ei_aligned_stack_new(RhsScalar, sizeW);
00099 RhsScalar* blockB = blocking.blockB();
00100 eigen_internal_assert(blockB!=0);
00101
00102
00103 for(Index k=0; k<depth; k+=kc)
00104 {
00105 const Index actual_kc = std::min(k+kc,depth)-k;
00106
00107
00108
00109 pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
00110
00111
00112
00113
00114
00115
00116
00117 while(info[tid].users!=0) {}
00118 info[tid].users += threads;
00119
00120 pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
00121
00122
00123 info[tid].sync = k;
00124
00125
00126 for(Index shift=0; shift<threads; ++shift)
00127 {
00128 Index j = (tid+shift)%threads;
00129
00130
00131
00132
00133 if(shift>0)
00134 while(info[j].sync!=k) {}
00135
00136 gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
00137 }
00138
00139
00140 for(Index i=mc; i<rows; i+=mc)
00141 {
00142 const Index actual_mc = std::min(i+mc,rows)-i;
00143
00144
00145 pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
00146
00147
00148 gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
00149 }
00150
00151
00152
00153 for(Index j=0; j<threads; ++j)
00154 #pragma omp atomic
00155 --(info[j].users);
00156 }
00157
00158 ei_aligned_stack_delete(LhsScalar, blockA, kc*mc);
00159 ei_aligned_stack_delete(RhsScalar, w, sizeW);
00160 }
00161 else
00162 #endif // EIGEN_HAS_OPENMP
00163 {
00164 EIGEN_UNUSED_VARIABLE(info);
00165
00166
00167 std::size_t sizeA = kc*mc;
00168 std::size_t sizeB = kc*cols;
00169 std::size_t sizeW = kc*Traits::WorkSpaceFactor;
00170 LhsScalar *blockA = blocking.blockA()==0 ? ei_aligned_stack_new(LhsScalar, sizeA) : blocking.blockA();
00171 RhsScalar *blockB = blocking.blockB()==0 ? ei_aligned_stack_new(RhsScalar, sizeB) : blocking.blockB();
00172 RhsScalar *blockW = blocking.blockW()==0 ? ei_aligned_stack_new(RhsScalar, sizeW) : blocking.blockW();
00173
00174
00175
00176 for(Index k2=0; k2<depth; k2+=kc)
00177 {
00178 const Index actual_kc = std::min(k2+kc,depth)-k2;
00179
00180
00181
00182
00183
00184 pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
00185
00186
00187
00188
00189 for(Index i2=0; i2<rows; i2+=mc)
00190 {
00191 const Index actual_mc = std::min(i2+mc,rows)-i2;
00192
00193
00194
00195
00196 pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
00197
00198
00199 gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
00200
00201 }
00202 }
00203
00204 if(blocking.blockA()==0) ei_aligned_stack_delete(LhsScalar, blockA, sizeA);
00205 if(blocking.blockB()==0) ei_aligned_stack_delete(RhsScalar, blockB, sizeB);
00206 if(blocking.blockW()==0) ei_aligned_stack_delete(RhsScalar, blockW, sizeW);
00207 }
00208 }
00209
00210 };
00211
00212
00213
00214
00215
00216
00217 template<typename Lhs, typename Rhs>
00218 struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
00219 : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
00220 {};
00221
00222 template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
00223 struct gemm_functor
00224 {
00225 gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
00226 BlockingType& blocking)
00227 : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
00228 {}
00229
00230 void initParallelSession() const
00231 {
00232 m_blocking.allocateB();
00233 }
00234
00235 void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
00236 {
00237 if(cols==-1)
00238 cols = m_rhs.cols();
00239
00240 Gemm::run(rows, cols, m_lhs.cols(),
00241 &m_lhs.coeffRef(row,0), m_lhs.outerStride(),
00242 &m_rhs.coeffRef(0,col), m_rhs.outerStride(),
00243 (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
00244 m_actualAlpha, m_blocking, info);
00245 }
00246
00247 protected:
00248 const Lhs& m_lhs;
00249 const Rhs& m_rhs;
00250 Dest& m_dest;
00251 Scalar m_actualAlpha;
00252 BlockingType& m_blocking;
00253 };
00254
00255 template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
00256 bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
00257
00258 template<typename _LhsScalar, typename _RhsScalar>
00259 class level3_blocking
00260 {
00261 typedef _LhsScalar LhsScalar;
00262 typedef _RhsScalar RhsScalar;
00263
00264 protected:
00265 LhsScalar* m_blockA;
00266 RhsScalar* m_blockB;
00267 RhsScalar* m_blockW;
00268
00269 DenseIndex m_mc;
00270 DenseIndex m_nc;
00271 DenseIndex m_kc;
00272
00273 public:
00274
00275 level3_blocking()
00276 : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
00277 {}
00278
00279 inline DenseIndex mc() const { return m_mc; }
00280 inline DenseIndex nc() const { return m_nc; }
00281 inline DenseIndex kc() const { return m_kc; }
00282
00283 inline LhsScalar* blockA() { return m_blockA; }
00284 inline RhsScalar* blockB() { return m_blockB; }
00285 inline RhsScalar* blockW() { return m_blockW; }
00286 };
00287
00288 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
00289 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
00290 : public level3_blocking<
00291 typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
00292 typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
00293 {
00294 enum {
00295 Transpose = StorageOrder==RowMajor,
00296 ActualRows = Transpose ? MaxCols : MaxRows,
00297 ActualCols = Transpose ? MaxRows : MaxCols
00298 };
00299 typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
00300 typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
00301 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
00302 enum {
00303 SizeA = ActualRows * MaxDepth,
00304 SizeB = ActualCols * MaxDepth,
00305 SizeW = MaxDepth * Traits::WorkSpaceFactor
00306 };
00307
00308 EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
00309 EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
00310 EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
00311
00312 public:
00313
00314 gemm_blocking_space(DenseIndex , DenseIndex , DenseIndex )
00315 {
00316 this->m_mc = ActualRows;
00317 this->m_nc = ActualCols;
00318 this->m_kc = MaxDepth;
00319 this->m_blockA = m_staticA;
00320 this->m_blockB = m_staticB;
00321 this->m_blockW = m_staticW;
00322 }
00323
00324 inline void allocateA() {}
00325 inline void allocateB() {}
00326 inline void allocateW() {}
00327 inline void allocateAll() {}
00328 };
00329
00330 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
00331 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
00332 : public level3_blocking<
00333 typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
00334 typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
00335 {
00336 enum {
00337 Transpose = StorageOrder==RowMajor
00338 };
00339 typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
00340 typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
00341 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
00342
00343 DenseIndex m_sizeA;
00344 DenseIndex m_sizeB;
00345 DenseIndex m_sizeW;
00346
00347 public:
00348
00349 gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
00350 {
00351 this->m_mc = Transpose ? cols : rows;
00352 this->m_nc = Transpose ? rows : cols;
00353 this->m_kc = depth;
00354
00355 computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
00356 m_sizeA = this->m_mc * this->m_kc;
00357 m_sizeB = this->m_kc * this->m_nc;
00358 m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
00359 }
00360
00361 void allocateA()
00362 {
00363 if(this->m_blockA==0)
00364 this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
00365 }
00366
00367 void allocateB()
00368 {
00369 if(this->m_blockB==0)
00370 this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
00371 }
00372
00373 void allocateW()
00374 {
00375 if(this->m_blockW==0)
00376 this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
00377 }
00378
00379 void allocateAll()
00380 {
00381 allocateA();
00382 allocateB();
00383 allocateW();
00384 }
00385
00386 ~gemm_blocking_space()
00387 {
00388 aligned_delete(this->m_blockA, m_sizeA);
00389 aligned_delete(this->m_blockB, m_sizeB);
00390 aligned_delete(this->m_blockW, m_sizeW);
00391 }
00392 };
00393
00394 }
00395
00396 template<typename Lhs, typename Rhs>
00397 class GeneralProduct<Lhs, Rhs, GemmProduct>
00398 : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
00399 {
00400 enum {
00401 MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
00402 };
00403 public:
00404 EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
00405
00406 typedef typename Lhs::Scalar LhsScalar;
00407 typedef typename Rhs::Scalar RhsScalar;
00408 typedef Scalar ResScalar;
00409
00410 GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
00411 {
00412 typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
00413 EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
00414 }
00415
00416 template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
00417 {
00418 eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
00419
00420 const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
00421 const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
00422
00423 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
00424 * RhsBlasTraits::extractScalarFactor(m_rhs);
00425
00426 typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
00427 Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
00428
00429 typedef internal::gemm_functor<
00430 Scalar, Index,
00431 internal::general_matrix_matrix_product<
00432 Index,
00433 LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
00434 RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
00435 (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
00436 _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
00437
00438 BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
00439
00440 internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
00441 }
00442 };
00443
00444 #endif // EIGEN_GENERAL_MATRIX_MATRIX_H