• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_FRAMEWORK_TYPED_ALLOCATOR_H_
17 #define TENSORFLOW_CORE_FRAMEWORK_TYPED_ALLOCATOR_H_
18 
19 #include <limits>
20 
21 #include "tensorflow/core/framework/allocator.h"
22 #include "tensorflow/core/framework/resource_handle.h"
23 #include "tensorflow/core/framework/type_traits.h"
24 #include "tensorflow/core/platform/types.h"
25 
26 namespace tensorflow {
27 
28 class Variant;
29 
30 // Convenience functions to do typed allocation.  C++ constructors
31 // and destructors are invoked for complex types if necessary.
32 class TypedAllocator {
33  public:
34   // May return NULL if the tensor has too many elements to represent in a
35   // single allocation.
36   template <typename T>
Allocate(Allocator * raw_allocator,size_t num_elements,const AllocationAttributes & allocation_attr)37   static T* Allocate(Allocator* raw_allocator, size_t num_elements,
38                      const AllocationAttributes& allocation_attr) {
39     // TODO(jeff): Do we need to allow clients to pass in alignment
40     // requirements?
41 
42     if (num_elements > (std::numeric_limits<size_t>::max() / sizeof(T))) {
43       return nullptr;
44     }
45 
46     void* p =
47         raw_allocator->AllocateRaw(Allocator::kAllocatorAlignment,
48                                    sizeof(T) * num_elements, allocation_attr);
49     T* typed_p = reinterpret_cast<T*>(p);
50     if (typed_p) RunCtor<T>(raw_allocator, typed_p, num_elements);
51     return typed_p;
52   }
53 
54   template <typename T>
Deallocate(Allocator * raw_allocator,T * ptr,size_t num_elements)55   static void Deallocate(Allocator* raw_allocator, T* ptr,
56                          size_t num_elements) {
57     if (ptr) {
58       RunDtor<T>(raw_allocator, ptr, num_elements);
59       raw_allocator->DeallocateRaw(ptr);
60     }
61   }
62 
63  private:
64   // No constructors or destructors are run for simple types
65   template <typename T>
RunCtor(Allocator * raw_allocator,T * p,size_t n)66   static void RunCtor(Allocator* raw_allocator, T* p, size_t n) {
67     static_assert(is_simple_type<T>::value, "T is not a simple type.");
68   }
69 
70   template <typename T>
RunDtor(Allocator * raw_allocator,T * p,size_t n)71   static void RunDtor(Allocator* raw_allocator, T* p, size_t n) {}
72 
73   static void RunVariantCtor(Variant* p, size_t n);
74 
75   static void RunVariantDtor(Variant* p, size_t n);
76 };
77 
78 template <>
79 /* static */
RunCtor(Allocator * raw_allocator,tstring * p,size_t n)80 inline void TypedAllocator::RunCtor(Allocator* raw_allocator, tstring* p,
81                                     size_t n) {
82   if (!raw_allocator->AllocatesOpaqueHandle()) {
83     for (size_t i = 0; i < n; ++p, ++i) new (p) tstring();
84   }
85 }
86 
87 template <>
88 /* static */
RunDtor(Allocator * raw_allocator,tstring * p,size_t n)89 inline void TypedAllocator::RunDtor(Allocator* raw_allocator, tstring* p,
90                                     size_t n) {
91   if (!raw_allocator->AllocatesOpaqueHandle()) {
92     for (size_t i = 0; i < n; ++p, ++i) p->~tstring();
93   }
94 }
95 
96 template <>
97 /* static */
RunCtor(Allocator * raw_allocator,ResourceHandle * p,size_t n)98 inline void TypedAllocator::RunCtor(Allocator* raw_allocator, ResourceHandle* p,
99                                     size_t n) {
100   if (!raw_allocator->AllocatesOpaqueHandle()) {
101     for (size_t i = 0; i < n; ++p, ++i) new (p) ResourceHandle();
102   }
103 }
104 
105 template <>
106 /* static */
RunDtor(Allocator * raw_allocator,ResourceHandle * p,size_t n)107 inline void TypedAllocator::RunDtor(Allocator* raw_allocator, ResourceHandle* p,
108                                     size_t n) {
109   if (!raw_allocator->AllocatesOpaqueHandle()) {
110     for (size_t i = 0; i < n; ++p, ++i) p->~ResourceHandle();
111   }
112 }
113 
114 template <>
115 /* static */
RunCtor(Allocator * raw_allocator,Variant * p,size_t n)116 inline void TypedAllocator::RunCtor(Allocator* raw_allocator, Variant* p,
117                                     size_t n) {
118   if (!raw_allocator->AllocatesOpaqueHandle()) {
119     RunVariantCtor(p, n);
120   }
121 }
122 
123 template <>
124 /* static */
RunDtor(Allocator * raw_allocator,Variant * p,size_t n)125 inline void TypedAllocator::RunDtor(Allocator* raw_allocator, Variant* p,
126                                     size_t n) {
127   if (!raw_allocator->AllocatesOpaqueHandle()) {
128     RunVariantDtor(p, n);
129   }
130 }
131 
132 }  // namespace tensorflow
133 
134 #endif  // TENSORFLOW_CORE_FRAMEWORK_TYPED_ALLOCATOR_H_
135