• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
17 #define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
18 
19 #include <unordered_map>
20 
21 #include "tensorflow/core/framework/allocator.h"
22 
23 #if GOOGLE_CUDA
24 #if GOOGLE_TENSORRT
25 #include "tensorrt/include/NvInfer.h"
26 #endif  // GOOGLE_TENSORRT
27 #endif  // GOOGLE_CUDA
28 
29 namespace tensorflow {
30 namespace tensorrt {
31 // std::align is not supported, so this function mimic its behavior.
32 void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space);
33 }  // namespace tensorrt
34 }  // namespace tensorflow
35 
36 #if GOOGLE_CUDA
37 #if GOOGLE_TENSORRT
38 
39 namespace tensorflow {
40 namespace tensorrt {
41 
42 class TRTBaseAllocator : public nvinfer1::IGpuAllocator {
43   // Base allocator class so we can have a virtual destructor;
44  public:
45   // python wrapper seems to be not happy with an pure virtual destructor;
46   virtual ~TRTBaseAllocator() = default;
47 };
48 
49 class TRTCudaAllocator : public TRTBaseAllocator {
50   // Allocator implementation that is using cuda allocator instead of device
51   // allocator in case we can't get device allocator from TF.
52  public:
TRTCudaAllocator()53   TRTCudaAllocator() {}
~TRTCudaAllocator()54   virtual ~TRTCudaAllocator() {}
55   void* allocate(uint64_t size, uint64_t alignment, uint32_t flags) override;
56   void free(void* memory) override;
57 };
58 
59 class TRTDeviceAllocator : public TRTBaseAllocator {
60   // Allocator implementation wrapping TF device allocators.
61  public:
62   TRTDeviceAllocator(Allocator* allocator);
63 
64   // TODO(aaroey): base class doesn't have a virtual destructor, work with
65   // Nvidia to fix it.
~TRTDeviceAllocator()66   virtual ~TRTDeviceAllocator() {
67     VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
68   }
69   void* allocate(uint64_t size, uint64_t alignment, uint32_t flags) override;
70   void free(void* memory) override;
71 
72  private:
73   Allocator* allocator_;
74 
75   // supporting alignment from allocation request requires a map to free;
76   std::unordered_map<void*, void*> mem_map_;
77 };
78 
79 }  // namespace tensorrt
80 }  // namespace tensorflow
81 
82 #endif  // GOOGLE_TENSORRT
83 #endif  // GOOGLE_CUDA
84 #endif  // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
85