• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_
18 
19 #include "tensorflow/compiler/xla/service/buffer_assignment.h"
20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
21 #include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
22 #include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
23 #include "tensorflow/compiler/xla/service/gpu/thunk.h"
24 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
25 #include "tensorflow/compiler/xla/xla_data.pb.h"
26 #include "tensorflow/core/lib/core/status.h"
27 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
28 
29 namespace xla {
30 namespace gpu {
31 
32 // This class stores everything that StreamExecutor needs to launch a BLAS gemm.
33 // It is generated by IrEmitter.
34 //
35 // This is thread-compatible.
36 class GemmThunk : public Thunk {
37  public:
38   // Constructs a thunk that computes "output = (lhs <dot> rhs) * alpha" using
39   // BLAS gemm.  hlo_instruction is as in Thunk. alpha is a constant.
40   GemmThunk(const BufferAllocation::Slice& lhs_buffer,
41             const BufferAllocation::Slice& rhs_buffer,
42             const BufferAllocation::Slice& output_buffer,
43             const Shape& lhs_shape, const Shape& rhs_shape,
44             const Shape& output_shape, double alpha, double beta,
45             const HloInstruction* hlo_instruction,
46             bool implements_whole_instruction);
47 
48   GemmThunk(const GemmThunk&) = delete;
49   GemmThunk& operator=(const GemmThunk&) = delete;
50 
51   // Does the gemm operation for the thunk on "stream", which must be non-null.
52   Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
53                          se::Stream* stream,
54                          HloExecutionProfiler* profiler) override;
55 
WillAutotuneKernel(se::Stream * stream)56   bool WillAutotuneKernel(se::Stream* stream) override {
57     // We will autotune this kernel if we don't already have a autotune result
58     // for the stream device.
59     return autotune_results_.find(
60                stream->parent()->GetDeviceDescription().name()) ==
61            autotune_results_.end();
62   }
63 
64  private:
65   const BufferAllocation::Slice lhs_buffer_;
66   const BufferAllocation::Slice rhs_buffer_;
67   const BufferAllocation::Slice output_buffer_;
68 
69   const Shape lhs_shape_;
70   const Shape rhs_shape_;
71   const Shape output_shape_;
72 
73   const double alpha_;
74   const double beta_;
75 
76   const bool implements_whole_instruction_;
77 
78   // Maps device names (StreamExecutor::DeviceDescription::name()) to autotune
79   // results.  The map's value is the best algorithm we've found for this thunk
80   // on this device, or an error if none of the algorithms worked and we should
81   // use the regular gemm without an algorithm.
82   //
83   // TODO(b/112415150):  Make this thread safe.
84   std::unordered_map<string, StatusOr<se::blas::AlgorithmType>>
85       autotune_results_;
86 };
87 
88 }  // namespace gpu
89 }  // namespace xla
90 
91 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_
92