• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/framework/allocator.h"
17 
18 #include <atomic>
19 
20 #include "tensorflow/core/framework/allocator_registry.h"
21 #include "tensorflow/core/framework/tracking_allocator.h"
22 #include "tensorflow/core/lib/strings/strcat.h"
23 #include "tensorflow/core/lib/strings/stringprintf.h"
24 #include "tensorflow/core/platform/mem.h"
25 #include "tensorflow/core/platform/mutex.h"
26 #include "tensorflow/core/platform/types.h"
27 
28 namespace tensorflow {
29 
30 thread_local MemoryDebugAnnotation ScopedMemoryDebugAnnotation::annotation_;
31 
DebugString() const32 string AllocatorStats::DebugString() const {
33   return strings::Printf(
34       "Limit:            %20lld\n"
35       "InUse:            %20lld\n"
36       "MaxInUse:         %20lld\n"
37       "NumAllocs:        %20lld\n"
38       "MaxAllocSize:     %20lld\n"
39       "Reserved:         %20lld\n"
40       "PeakReserved:     %20lld\n"
41       "LargestFreeBlock: %20lld\n",
42       static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
43       static_cast<long long>(this->bytes_in_use),
44       static_cast<long long>(this->peak_bytes_in_use),
45       static_cast<long long>(this->num_allocs),
46       static_cast<long long>(this->largest_alloc_size),
47       static_cast<long long>(this->bytes_reserved),
48       static_cast<long long>(this->peak_bytes_reserved),
49       static_cast<long long>(this->largest_free_block_bytes));
50 }
51 
52 constexpr size_t Allocator::kAllocatorAlignment;
53 
~Allocator()54 Allocator::~Allocator() {}
55 
56 // If true, cpu allocator collects full stats.
57 static bool cpu_allocator_collect_full_stats = false;
58 
EnableCPUAllocatorFullStats()59 void EnableCPUAllocatorFullStats() { cpu_allocator_collect_full_stats = true; }
CPUAllocatorFullStatsEnabled()60 bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
61 
DebugString() const62 string AllocatorAttributes::DebugString() const {
63   return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
64                          " nic_compatible=", nic_compatible(),
65                          " gpu_compatible=", gpu_compatible(), ")");
66 }
67 
cpu_allocator_base()68 Allocator* cpu_allocator_base() {
69   static Allocator* cpu_alloc =
70       AllocatorFactoryRegistry::singleton()->GetAllocator();
71   // TODO(tucker): This really seems wrong.  It's only going to be effective on
72   // the first call in a process (but the desired effect is associated with a
73   // session), and we probably ought to be tracking the highest level Allocator,
74   // not the lowest.  Revisit the advertised semantics of the triggering option.
75   if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
76     cpu_alloc = new TrackingAllocator(cpu_alloc, true);
77   }
78   return cpu_alloc;
79 }
80 
cpu_allocator(int numa_node)81 Allocator* cpu_allocator(int numa_node) {
82   // Correctness relies on devices being created prior to the first call
83   // to cpu_allocator, if devices are ever to be created in the process.
84   // Device creation in turn triggers ProcessState creation and the availability
85   // of the correct access pointer via this function call.
86   static ProcessStateInterface* ps =
87       AllocatorFactoryRegistry::singleton()->process_state();
88   if (ps) {
89     return ps->GetCPUAllocator(numa_node);
90   } else {
91     return cpu_allocator_base();
92   }
93 }
94 
SubAllocator(const std::vector<Visitor> & alloc_visitors,const std::vector<Visitor> & free_visitors)95 SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
96                            const std::vector<Visitor>& free_visitors)
97     : alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
98 
VisitAlloc(void * ptr,int index,size_t num_bytes)99 void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
100   for (const auto& v : alloc_visitors_) {
101     v(ptr, index, num_bytes);
102   }
103 }
104 
VisitFree(void * ptr,int index,size_t num_bytes)105 void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
106   // Although we don't guarantee any order of visitor application, strive
107   // to apply free visitors in reverse order of alloc visitors.
108   for (int i = free_visitors_.size() - 1; i >= 0; --i) {
109     free_visitors_[i](ptr, index, num_bytes);
110   }
111 }
112 }  // namespace tensorflow
113