• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PARALLELIZER_H
11 #define EIGEN_PARALLELIZER_H
12 
13 #include <atomic>
14 
15 namespace Eigen {
16 
17 namespace internal {
18 
19 /** \internal */
manage_multi_threading(Action action,int * v)20 inline void manage_multi_threading(Action action, int* v)
21 {
22   static EIGEN_UNUSED int m_maxThreads = -1;
23 
24   if(action==SetAction)
25   {
26     eigen_internal_assert(v!=0);
27     m_maxThreads = *v;
28   }
29   else if(action==GetAction)
30   {
31     eigen_internal_assert(v!=0);
32     #ifdef EIGEN_HAS_OPENMP
33     if(m_maxThreads>0)
34       *v = m_maxThreads;
35     else
36       *v = omp_get_max_threads();
37     #else
38     *v = 1;
39     #endif
40   }
41   else
42   {
43     eigen_internal_assert(false);
44   }
45 }
46 
47 }
48 
49 /** Must be call first when calling Eigen from multiple threads */
initParallel()50 inline void initParallel()
51 {
52   int nbt;
53   internal::manage_multi_threading(GetAction, &nbt);
54   std::ptrdiff_t l1, l2, l3;
55   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
56 }
57 
58 /** \returns the max number of threads reserved for Eigen
59   * \sa setNbThreads */
nbThreads()60 inline int nbThreads()
61 {
62   int ret;
63   internal::manage_multi_threading(GetAction, &ret);
64   return ret;
65 }
66 
67 /** Sets the max number of threads reserved for Eigen
68   * \sa nbThreads */
setNbThreads(int v)69 inline void setNbThreads(int v)
70 {
71   internal::manage_multi_threading(SetAction, &v);
72 }
73 
74 namespace internal {
75 
76 template<typename Index> struct GemmParallelInfo
77 {
GemmParallelInfoGemmParallelInfo78   GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
79 
80   std::atomic<Index> sync;
81   std::atomic<int> users;
82 
83   Index lhs_start;
84   Index lhs_length;
85 };
86 
87 template<bool Condition, typename Functor, typename Index>
parallelize_gemm(const Functor & func,Index rows,Index cols,Index depth,bool transpose)88 void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
89 {
90   // TODO when EIGEN_USE_BLAS is defined,
91   // we should still enable OMP for other scalar types
92 #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
93   // FIXME the transpose variable is only needed to properly split
94   // the matrix product when multithreading is enabled. This is a temporary
95   // fix to support row-major destination matrices. This whole
96   // parallelizer mechanism has to be redisigned anyway.
97   EIGEN_UNUSED_VARIABLE(depth);
98   EIGEN_UNUSED_VARIABLE(transpose);
99   func(0,rows, 0,cols);
100 #else
101 
102   // Dynamically check whether we should enable or disable OpenMP.
103   // The conditions are:
104   // - the max number of threads we can create is greater than 1
105   // - we are not already in a parallel code
106   // - the sizes are large enough
107 
108   // compute the maximal number of threads from the size of the product:
109   // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
110   Index size = transpose ? rows : cols;
111   Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
112 
113   // compute the maximal number of threads from the total amount of work:
114   double work = static_cast<double>(rows) * static_cast<double>(cols) *
115       static_cast<double>(depth);
116   double kMinTaskSize = 50000;  // FIXME improve this heuristic.
117   pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
118 
119   // compute the number of threads we are going to use
120   Index threads = std::min<Index>(nbThreads(), pb_max_threads);
121 
122   // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
123   // then abort multi-threading
124   // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
125   if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
126     return func(0,rows, 0,cols);
127 
128   Eigen::initParallel();
129   func.initParallelSession(threads);
130 
131   if(transpose)
132     std::swap(rows,cols);
133 
134   ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
135 
136   #pragma omp parallel num_threads(threads)
137   {
138     Index i = omp_get_thread_num();
139     // Note that the actual number of threads might be lower than the number of request ones.
140     Index actual_threads = omp_get_num_threads();
141 
142     Index blockCols = (cols / actual_threads) & ~Index(0x3);
143     Index blockRows = (rows / actual_threads);
144     blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
145 
146     Index r0 = i*blockRows;
147     Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
148 
149     Index c0 = i*blockCols;
150     Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
151 
152     info[i].lhs_start = r0;
153     info[i].lhs_length = actualBlockRows;
154 
155     if(transpose) func(c0, actualBlockCols, 0, rows, info);
156     else          func(0, rows, c0, actualBlockCols, info);
157   }
158 #endif
159 }
160 
161 } // end namespace internal
162 
163 } // end namespace Eigen
164 
165 #endif // EIGEN_PARALLELIZER_H
166