1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
17
18 #define EIGEN_USE_GPU
19
20 #include <assert.h>
21 #include <stdio.h>
22
23 #include "tensorflow/core/framework/tensor_types.h"
24 #include "tensorflow/core/kernels/gpu_prim.h"
25 #include "tensorflow/core/kernels/multinomial_op.h"
26 #include "tensorflow/core/kernels/random_op.h"
27 #include "tensorflow/core/kernels/reduction_gpu_kernels.cu.h"
28 #include "tensorflow/core/kernels/reduction_ops_common.h"
29 #include "tensorflow/core/lib/random/philox_random.h"
30 #include "tensorflow/core/lib/random/random_distributions.h"
31 #include "tensorflow/core/util/gpu_kernel_helper.h"
32
33 namespace tensorflow {
34
35 namespace functor {
36
37 using GPUDevice = Eigen::GpuDevice;
38
39 // Kernel for Multinomial op. Data is interpreted to have the following shapes:
40 // scores: [B, S, C]; maxima: [B, S]; output: [B, S].
41 template <typename OutputType>
MultinomialKernel(int32 nthreads,const int32 num_classes,const int32 num_samples,const float * __restrict__ scores,const float * __restrict__ maxima,OutputType * __restrict__ output)42 __global__ void MultinomialKernel(int32 nthreads, const int32 num_classes,
43 const int32 num_samples,
44 const float* __restrict__ scores,
45 const float* __restrict__ maxima,
46 OutputType* __restrict__ output) {
47 GPU_1D_KERNEL_LOOP(index, nthreads) {
48 const int maxima_idx = index / num_classes;
49 if (ldg(maxima + maxima_idx) == ldg(scores + index)) {
50 using UnsignedOutputType = typename std::make_unsigned<OutputType>::type;
51 GpuAtomicMax(reinterpret_cast<UnsignedOutputType*>(output + maxima_idx),
52 static_cast<UnsignedOutputType>(index % num_classes));
53 }
54 }
55 }
56
57 template <typename T, typename OutputType>
58 struct MultinomialFunctor<GPUDevice, T, OutputType> {
operator ()tensorflow::functor::MultinomialFunctor59 void operator()(OpKernelContext* ctx, const GPUDevice& d,
60 typename TTypes<T>::ConstMatrix logits,
61 typename TTypes<float>::Flat noises,
62 typename TTypes<float>::Flat scores,
63 typename TTypes<float>::Flat maxima, int batch_size,
64 int num_classes, int num_samples,
65 const random::PhiloxRandom& gen,
66 typename TTypes<OutputType>::Matrix output) {
67 // Uniform, [0, 1).
68 typedef random::UniformDistribution<random::PhiloxRandom, float> Dist;
69 functor::FillPhiloxRandom<GPUDevice, Dist>()(
70 ctx, d, /*key=*/nullptr, /*counter=*/nullptr, gen, noises.data(),
71 noises.size(), Dist());
72
73 Eigen::IndexList<int, int, int> bsc;
74 bsc.set(0, batch_size);
75 bsc.set(1, num_samples);
76 bsc.set(2, num_classes);
77
78 Eigen::IndexList<int, Eigen::type2index<1>, int> boc;
79 boc.set(0, batch_size);
80 boc.set(2, num_classes);
81
82 Eigen::IndexList<Eigen::type2index<1>, int, Eigen::type2index<1>> oso;
83 oso.set(1, num_samples);
84
85 // Calculates "scores = logits - log(-log(noises))"; B*C*S elements.
86 // NOTE: we don't store back to "noises" because having it appear on both
87 // sides is potentially unsafe (e.g. Eigen may use ldg() to load RHS data).
88 // 2e-30 is chosen so as to be small enough to only change 0 -> 2e-30 while
89 // not affect any of the other numbers (smallest is ~1e-7), but not so small
90 // that log(x) == -inf, which is why it needs to be larger than 0 in the
91 // first place.
92 To32Bit(scores).device(d) =
93 To32Bit(logits).reshape(boc).broadcast(oso).template cast<float>() -
94 ((-((To32Bit(noises) + 2e-30f).log())).log());
95
96 // Max-reduce along classes for each (batch, sample).
97 typedef const Eigen::array<TTypes<float>::Tensor::Index, 1>& ReductionAxes;
98 Constants<GPUDevice> constants;
99 gpuprim::Max op;
100 functor::ReduceImpl<float, gpuprim::Max, float*, const float*,
101 ReductionAxes>(
102 /*ctx=*/ctx, /*out=*/maxima.data(), /*in=*/scores.data(), /*in_rank=*/2,
103 /*in_dim0=*/batch_size * num_samples,
104 /*in_dim1=*/num_classes, /*in_dim2=*/1, /*out_rank=*/1,
105 /*reduction_axes=*/constants.kOne, /*Op=*/op);
106
107 // Necessary for atomicMax() inside the kernel.
108 output.device(d) = output.constant(0LL);
109
110 const int32 work_items = batch_size * num_samples * num_classes;
111 GpuLaunchConfig config = GetGpuLaunchConfig(work_items, d);
112 TF_CHECK_OK(GpuLaunchKernel(
113 MultinomialKernel<OutputType>, config.block_count,
114 config.thread_per_block, 0, d.stream(), config.virtual_thread_count,
115 num_classes, num_samples, scores.data(), maxima.data(), output.data()));
116 }
117 };
118
119 // Explicit instantiation of the GPU functors.
120 template struct MultinomialFunctor<GPUDevice, Eigen::half, int32>;
121 template struct MultinomialFunctor<GPUDevice, float, int32>;
122 template struct MultinomialFunctor<GPUDevice, double, int32>;
123 template struct MultinomialFunctor<GPUDevice, int32, int32>;
124 template struct MultinomialFunctor<GPUDevice, int64, int32>;
125
126 template struct MultinomialFunctor<GPUDevice, Eigen::half, int64>;
127 template struct MultinomialFunctor<GPUDevice, float, int64>;
128 template struct MultinomialFunctor<GPUDevice, double, int64>;
129 template struct MultinomialFunctor<GPUDevice, int32, int64>;
130 template struct MultinomialFunctor<GPUDevice, int64, int64>;
131
132 } // namespace functor
133 } // namespace tensorflow
134
135 #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
136