• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
17 #define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
18 
19 // This file requires the following include because it uses GpuAtomicMax:
20 // #include "tensorflow/core/util/gpu_kernel_helper.h"
21 
22 // Unfortunately we can't add the #include, since it breaks compilation for
23 // non-GPU targets. This only breaks in clang, because it's more strict for
24 // template code and GpuAtomicMax is used in template context.
25 
26 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
27 #include "tensorflow/core/framework/tensor.h"
28 #include "tensorflow/core/framework/tensor_shape.h"
29 #include "tensorflow/core/framework/tensor_types.h"
30 
31 namespace tensorflow {
32 
33 class OpKernelContext;
34 
35 bool DisableSegmentReductionOpDeterminismExceptions();
36 
37 // Type of SparseSegmentReduction operation to perform gradient of.
38 enum class SparseSegmentReductionOperation { kSum, kMean, kSqrtN };
39 
40 namespace functor {
41 
42 #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
43 typedef Eigen::GpuDevice GPUDevice;
44 // Functor for SegmentSumGPUOp & SegmentProdGPUOp & SegmentMaxGPUOp
45 //             & SegmentMinGPUOp.
46 // output_rows: the number of output segments (unique segment ids in
47 //                'segment_ids').
48 // segment_ids_shape: shape of 'segment_ids' tensor.
49 // segment_ids: unsorted map from input to output segment ids at which to
50 //                perform segment sum operation.
51 // data_size: size of input data tensor.
52 // data: input data tensor.
53 // output: output reshaped to {output_rows, output.size/output_rows}
54 template <typename T, typename Index, typename InitialValueF,
55           typename ReductionF, typename AtomicReductionF>
56 struct SegmentReductionFunctor {
57   void operator()(OpKernelContext* ctx, const GPUDevice& d,
58                   const Index output_rows, const TensorShape& segment_ids_shape,
59                   typename TTypes<Index>::ConstFlat segment_ids,
60                   const Index data_size, const T* data,
61                   typename TTypes<T, 2>::Tensor output);
62   static constexpr bool atomic_reduction_is_associative =
63       AtomicReductionF::is_associative;
64 };
65 
66 #endif
67 
68 template <typename Device, typename T, typename Index, typename InitialValueF,
69           typename ReductionF>
70 struct UnsortedSegmentFunctor {
71   void operator()(OpKernelContext* ctx, const TensorShape& segment_ids_shape,
72                   typename TTypes<Index>::ConstFlat segment_ids,
73                   typename TTypes<T, 2>::ConstTensor data,
74                   typename TTypes<T, 2>::Tensor output);
75 };
76 
77 #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
78 
79 // Atomic reduction functors for the gpu.
80 template <typename T>
81 struct AtomicSumOpGpu {
operatorAtomicSumOpGpu82   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
83                                                         const T& value) {
84     GpuAtomicAdd(dest, value);
85   }
86   static constexpr bool is_associative = std::is_integral<T>::value;
87 };
88 
89 template <typename T>
90 struct AtomicProdOpGpu {
operatorAtomicProdOpGpu91   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
92                                                         const T& value) {
93     GpuAtomicMul(dest, value);
94   }
95   static constexpr bool is_associative = std::is_integral<T>::value;
96 };
97 
98 template <typename T>
99 struct AtomicMaxOpGpu {
operatorAtomicMaxOpGpu100   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
101                                                         const T& value) {
102     GpuAtomicMax(dest, value);
103   }
104   static constexpr bool is_associative = true;
105 };
106 
107 template <typename T>
108 struct AtomicMinOpGpu {
operatorAtomicMinOpGpu109   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
110                                                         const T& value) {
111     GpuAtomicMin(dest, value);
112   }
113   static constexpr bool is_associative = true;
114 };
115 
116 // Non-atomic reduction functors for the gpu.
117 template <typename T>
118 struct NonAtomicSumOpGpu {
operatorNonAtomicSumOpGpu119   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
120                                                         const T& value) {
121     *dest += value;
122   }
123 };
124 
125 template <typename T>
126 struct NonAtomicProdOpGpu {
operatorNonAtomicProdOpGpu127   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
128                                                         const T& value) {
129     *dest *= value;
130   }
131 };
132 
133 template <typename T>
134 struct NonAtomicMaxOpGpu {
operatorNonAtomicMaxOpGpu135   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
136                                                         const T& value) {
137     *dest = max(*dest, value);
138   }
139 };
140 
141 template <typename T>
142 struct NonAtomicMinOpGpu {
operatorNonAtomicMinOpGpu143   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest,
144                                                         const T& value) {
145     *dest = min(*dest, value);
146   }
147 };
148 
149 #endif  // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
150 
151 // Initial value functors.
152 template <typename T>
153 struct Zero {
operatorZero154   EIGEN_STRONG_INLINE T operator()() const { return T(0); }
155 };
156 
157 template <typename T>
158 struct One {
operatorOne159   EIGEN_STRONG_INLINE T operator()() const { return T(1); }
160 };
161 
162 template <typename T>
163 struct Lowest {
operatorLowest164   EIGEN_STRONG_INLINE T operator()() const {
165     return Eigen::NumTraits<T>::lowest();
166   }
167 };
168 
169 template <typename T>
170 struct Highest {
operatorHighest171   EIGEN_STRONG_INLINE T operator()() const {
172     return Eigen::NumTraits<T>::highest();
173   }
174 };
175 
176 template <typename T, typename Index, typename SegmentId>
177 struct SparseSegmentReductionFunctor {
178   Status operator()(OpKernelContext* context, bool is_mean, bool is_sqrtn,
179                     T default_value, typename TTypes<T, 2>::ConstTensor input,
180                     typename TTypes<Index>::ConstVec indices,
181                     typename TTypes<SegmentId>::ConstVec segment_ids,
182                     typename TTypes<T, 2>::Tensor output);
183 };
184 
185 template <class Device, typename T, typename Index, typename SegmentId>
186 struct SparseSegmentGradFunctor {
187   void operator()(OpKernelContext* context,
188                   SparseSegmentReductionOperation operation,
189                   typename TTypes<T>::ConstMatrix input_flat,
190                   typename TTypes<Index>::ConstVec indices_vec,
191                   typename TTypes<SegmentId>::ConstVec segment_vec,
192                   typename TTypes<T>::Matrix output_flat);
193 };
194 
195 }  // namespace functor
196 }  // namespace tensorflow
197 
198 #endif  // TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
199