• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_
18 
19 #include <string>
20 
21 #include "absl/strings/string_view.h"
22 #include "absl/types/span.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/Module.h"
28 #include "tensorflow/compiler/xla/xla_data.pb.h"
29 
30 namespace xla {
31 namespace gpu {
32 
33 // Enumeration to get target specific intrinsics.
34 enum class TargetIntrinsicID {
35   kThreadIdx = 0,
36   kThreadIdy,
37   kThreadIdz,
38   kBlockIdx,
39   kBlockIdy,
40   kBlockIdz,
41   kBarrierId,
42   kBlockDimx,
43   kBlockDimy,
44   kBlockDimz,
45 };
46 
47 // Enumeration to get target specific device math function.
48 enum class TargetDeviceFunctionID {
49   kAtan2 = 0,
50   kCos,
51   kErfcinv,
52   kExp,
53   kExpm1,
54   kFmod,
55   kHypot,
56   kLog,
57   kLog1p,
58   kPow,
59   kRound,
60   kRsqrt,
61   kSin,
62   kSqrt,
63   kTanh,
64 };
65 
66 // Emits IR to call a device function named "callee_name" on the given
67 // operand. Returns the IR value that represents the return value.
68 llvm::CallInst* EmitDeviceFunctionCall(
69     const std::string& callee_name, absl::Span<llvm::Value* const> operands,
70     absl::Span<const PrimitiveType> input_type, PrimitiveType output_type,
71     absl::Span<const llvm::Attribute::AttrKind> attributes,
72     llvm::IRBuilder<>* b, absl::string_view name = "");
73 
74 // Emits a call to the specified target intrinsic with the given operands.
75 // Overloaded intrinsics (for example, "minnum") must include a type
76 // in overloaded_types  for each overloaded type. Typically, overloaded
77 // intrinsics have only a single overloaded type.
78 llvm::CallInst* EmitCallToTargetIntrinsic(
79     TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
80     absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b);
81 
82 // Annotate the kernel as GPU kernel according to the GPU target.
83 void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
84                                  llvm::IRBuilder<>* b);
85 
86 std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
87                                      PrimitiveType output_type,
88                                      llvm::IRBuilder<>* b);
89 
90 }  // namespace gpu
91 }  // namespace xla
92 
93 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_
94