• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/framework/allocator.h"
17 
18 #include <atomic>
19 
20 #include "tensorflow/core/framework/allocator_registry.h"
21 #include "tensorflow/core/framework/tracking_allocator.h"
22 #include "tensorflow/core/lib/strings/strcat.h"
23 #include "tensorflow/core/lib/strings/stringprintf.h"
24 #include "tensorflow/core/platform/mem.h"
25 #include "tensorflow/core/platform/mutex.h"
26 #include "tensorflow/core/platform/types.h"
27 
28 namespace tensorflow {
29 
30 #ifdef TENSORFLOW_MEM_DEBUG
31 thread_local const char* pending_op_name = nullptr;
32 thread_local uint64 pending_step_id = 0;
33 #endif
34 
DebugString() const35 string AllocatorStats::DebugString() const {
36   return strings::Printf(
37       "Limit:        %20lld\n"
38       "InUse:        %20lld\n"
39       "MaxInUse:     %20lld\n"
40       "NumAllocs:    %20lld\n"
41       "MaxAllocSize: %20lld\n",
42       static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
43       static_cast<long long>(this->bytes_in_use),
44       static_cast<long long>(this->peak_bytes_in_use),
45       static_cast<long long>(this->num_allocs),
46       static_cast<long long>(this->largest_alloc_size));
47 }
48 
49 constexpr size_t Allocator::kAllocatorAlignment;
50 
~Allocator()51 Allocator::~Allocator() {}
52 
53 // If true, cpu allocator collects full stats.
54 static bool cpu_allocator_collect_full_stats = false;
55 
EnableCPUAllocatorFullStats(bool enable)56 void EnableCPUAllocatorFullStats(bool enable) {
57   cpu_allocator_collect_full_stats = enable;
58 }
CPUAllocatorFullStatsEnabled()59 bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
60 
DebugString() const61 string AllocatorAttributes::DebugString() const {
62   return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
63                          " nic_compatible=", nic_compatible(),
64                          " gpu_compatible=", gpu_compatible(), ")");
65 }
66 
cpu_allocator_base()67 Allocator* cpu_allocator_base() {
68   static Allocator* cpu_alloc =
69       AllocatorFactoryRegistry::singleton()->GetAllocator();
70   // TODO(tucker): This really seems wrong.  It's only going to be effective on
71   // the first call in a process (but the desired effect is associated with a
72   // session), and we probably ought to be tracking the highest level Allocator,
73   // not the lowest.  Revisit the advertised semantics of the triggering option.
74   if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
75     cpu_alloc = new TrackingAllocator(cpu_alloc, true);
76   }
77   return cpu_alloc;
78 }
79 
cpu_allocator(int numa_node)80 Allocator* cpu_allocator(int numa_node) {
81   // Correctness relies on devices being created prior to the first call
82   // to cpu_allocator, if devices are ever to be created in the process.
83   // Device creation in turn triggers ProcessState creation and the availability
84   // of the correct access pointer via this function call.
85   static ProcessStateInterface* ps =
86       AllocatorFactoryRegistry::singleton()->process_state();
87   if (ps) {
88     return ps->GetCPUAllocator(numa_node);
89   } else {
90     return cpu_allocator_base();
91   }
92 }
93 
SubAllocator(const std::vector<Visitor> & alloc_visitors,const std::vector<Visitor> & free_visitors)94 SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
95                            const std::vector<Visitor>& free_visitors)
96     : alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
97 
VisitAlloc(void * ptr,int index,size_t num_bytes)98 void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
99   for (const auto& v : alloc_visitors_) {
100     v(ptr, index, num_bytes);
101   }
102 }
103 
VisitFree(void * ptr,int index,size_t num_bytes)104 void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
105   // Although we don't guarantee any order of visitor application, strive
106   // to apply free visitors in reverse order of alloc visitors.
107   for (int i = free_visitors_.size() - 1; i >= 0; --i) {
108     free_visitors_[i](ptr, index, num_bytes);
109   }
110 }
111 }  // namespace tensorflow
112