• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_TARGET_MACHINE_FEATURES_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_TARGET_MACHINE_FEATURES_H_
18 
19 #include "absl/container/flat_hash_map.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Target/TargetMachine.h"
22 #include "tensorflow/compiler/xla/primitive_util.h"
23 
24 namespace xla {
25 namespace cpu {
26 
27 // Abstract interface for classes providing information about the target we're
28 // compiling for.
29 class TargetMachineFeatures {
30  public:
31   static constexpr int kX86AvxVectorByteSize = 32;
32 
33   // Input and output tensor buffers must be aligned to this many bytes if we
34   // want to call an Eigen backed GEMM or Convolution.
35   static constexpr int kEigenExpectedTensorAlignment = 16;
36 
37   // Return the vectorization factor, which is the number of bytes of data
38   // explicitly vectorized routines will try to process at once.
39   virtual int vectorization_factor_in_bytes() const = 0;
40 
41   // Return the size of the largest vector size in bytes.  We need to pass in
42   // "function" since llvm functions can contain annotations for specializing
43   // them to specific micro-architectures (though currently XLA does not use
44   // this functionality).
45   virtual int vector_register_byte_size(
46       const llvm::Function& function) const = 0;
47 
48   // Return the number of elements of type `type` that can fit into the largest
49   // vector register available.  We need to pass in "function" since llvm
50   // functions can contain annotations for specializing them to specific
51   // micro-architectures (though currently XLA does not use this functionality).
52   virtual int vector_register_num_elements(const llvm::Function& function,
53                                            PrimitiveType type) const = 0;
54 
55   // Returns the minimum alignment for a buffer of size size_bytes.
56   virtual int64 minimum_alignment_for_allocation(int64 size_bytes) const = 0;
57 
58   virtual ~TargetMachineFeatures() = default;
59 };
60 
61 // Implements the TargetMachineFeatures interface using an llvm::TargetMachine.
62 class LLVMTargetMachineFeatures : public TargetMachineFeatures {
63  public:
64   static constexpr int kX86AvxVectorByteSize = 32;
65 
LLVMTargetMachineFeatures(llvm::TargetMachine * target_machine)66   LLVMTargetMachineFeatures(llvm::TargetMachine* target_machine)
67       : target_machine_(target_machine) {}
68 
vectorization_factor_in_bytes()69   int vectorization_factor_in_bytes() const override {
70     // Ideally this should be a function of the cache line size (which we can
71     // get from llvm::TargetTransformInfo::getCacheLineSize) of the target
72     // machine.  Guess a value of 128 bytes for now.
73     return 128;
74   }
75 
vector_register_byte_size(const llvm::Function & function)76   int vector_register_byte_size(const llvm::Function& function) const override {
77     llvm::TargetTransformInfo* tti = GetTargetTransformInfoFor(function);
78     return tti->getRegisterBitWidth(/*Vector=*/true) / 8;
79   }
80 
vector_register_num_elements(const llvm::Function & function,PrimitiveType type)81   int vector_register_num_elements(const llvm::Function& function,
82                                    PrimitiveType type) const override {
83     return vector_register_byte_size(function) /
84            (primitive_util::BitWidth(type) / 8);
85   }
86 
87   int64 minimum_alignment_for_allocation(int64 size_bytes) const override;
88 
89  private:
90   llvm::TargetTransformInfo* GetTargetTransformInfoFor(
91       const llvm::Function& function) const;
92 
93   // This cache saves us from having to create a llvm::TargetTransformInfo for
94   // every call to GetTargetTransformInfoFor (creating a TargetTransformInfo
95   // costs one heap allocation on X86).
96   //
97   // This is mutated from within `GetTargetTransformInfoFor` which is
98   // semantically a getter (and thus `const`); and is therefore declared
99   // mutable.  Making this mutable is okay because it has cache semantics.
100   mutable absl::flat_hash_map<const llvm::Function*, llvm::TargetTransformInfo>
101       target_transform_info_cache_;
102   llvm::TargetMachine* target_machine_;
103 };
104 
105 }  // namespace cpu
106 }  // namespace xla
107 
108 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_TARGET_MACHINE_FEATURES_H_
109