1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/common_runtime/process_state.h"
17
18 #include <cstring>
19 #include <vector>
20
21 #include "tensorflow/core/common_runtime/bfc_allocator.h"
22 #include "tensorflow/core/common_runtime/pool_allocator.h"
23 #include "tensorflow/core/framework/allocator.h"
24 #include "tensorflow/core/framework/log_memory.h"
25 #include "tensorflow/core/framework/tracking_allocator.h"
26 #include "tensorflow/core/lib/gtl/stl_util.h"
27 #include "tensorflow/core/lib/strings/strcat.h"
28 #include "tensorflow/core/platform/logging.h"
29 #include "tensorflow/core/platform/mutex.h"
30 #include "tensorflow/core/platform/types.h"
31 #include "tensorflow/core/util/env_var.h"
32
33 namespace tensorflow {
34
singleton()35 /*static*/ ProcessState* ProcessState::singleton() {
36 static ProcessState* instance = new ProcessState;
37 static std::once_flag f;
38 std::call_once(f, []() {
39 AllocatorFactoryRegistry::singleton()->process_state_ = instance;
40 });
41
42 return instance;
43 }
44
ProcessState()45 ProcessState::ProcessState() : numa_enabled_(false) {}
46
DebugString()47 string ProcessState::MemDesc::DebugString() {
48 return strings::StrCat((loc == CPU ? "CPU " : "GPU "), dev_index,
49 ", dma: ", gpu_registered, ", nic: ", nic_registered);
50 }
51
PtrType(const void * ptr)52 ProcessState::MemDesc ProcessState::PtrType(const void* ptr) {
53 if (FLAGS_brain_gpu_record_mem_types) {
54 auto iter = mem_desc_map_.find(ptr);
55 if (iter != mem_desc_map_.end()) {
56 return iter->second;
57 }
58 }
59 return MemDesc();
60 }
61
GetCPUAllocator(int numa_node)62 Allocator* ProcessState::GetCPUAllocator(int numa_node) {
63 if (!numa_enabled_ || numa_node == port::kNUMANoAffinity) numa_node = 0;
64 mutex_lock lock(mu_);
65 while (cpu_allocators_.size() <= static_cast<size_t>(numa_node)) {
66 // If visitors have been defined we need an Allocator built from
67 // a SubAllocator. Prefer BFCAllocator, but fall back to PoolAllocator
68 // depending on env var setting.
69 const bool alloc_visitors_defined =
70 (!cpu_alloc_visitors_.empty() || !cpu_free_visitors_.empty());
71 bool use_bfc_allocator = false;
72 Status status = ReadBoolFromEnvVar(
73 "TF_CPU_ALLOCATOR_USE_BFC", alloc_visitors_defined, &use_bfc_allocator);
74 if (!status.ok()) {
75 LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
76 }
77 Allocator* allocator = nullptr;
78 SubAllocator* sub_allocator =
79 (numa_enabled_ || alloc_visitors_defined || use_bfc_allocator)
80 ? new BasicCPUAllocator(
81 numa_enabled_ ? numa_node : port::kNUMANoAffinity,
82 cpu_alloc_visitors_, cpu_free_visitors_)
83 : nullptr;
84 if (use_bfc_allocator) {
85 // TODO(reedwm): evaluate whether 64GB by default is the best choice.
86 int64 cpu_mem_limit_in_mb = -1;
87 Status status = ReadInt64FromEnvVar("TF_CPU_BFC_MEM_LIMIT_IN_MB",
88 1LL << 16 /*64GB max by default*/,
89 &cpu_mem_limit_in_mb);
90 if (!status.ok()) {
91 LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
92 }
93 int64 cpu_mem_limit = cpu_mem_limit_in_mb * (1LL << 20);
94 DCHECK(sub_allocator);
95 allocator =
96 new BFCAllocator(sub_allocator, cpu_mem_limit, true /*allow_growth*/,
97 "bfc_cpu_allocator_for_gpu" /*name*/);
98 VLOG(2) << "Using BFCAllocator with memory limit of "
99 << cpu_mem_limit_in_mb << " MB for ProcessState CPU allocator";
100 } else if (sub_allocator) {
101 DCHECK(sub_allocator);
102 allocator =
103 new PoolAllocator(100 /*pool_size_limit*/, true /*auto_resize*/,
104 sub_allocator, new NoopRounder, "cpu_pool");
105 VLOG(2) << "Using PoolAllocator for ProcessState CPU allocator "
106 << "numa_enabled_=" << numa_enabled_
107 << " numa_node=" << numa_node;
108 } else {
109 DCHECK(!sub_allocator);
110 allocator = cpu_allocator_base();
111 }
112 if (LogMemory::IsEnabled() && !allocator->TracksAllocationSizes()) {
113 // Wrap the allocator to track allocation ids for better logging
114 // at the cost of performance.
115 allocator = new TrackingAllocator(allocator, true);
116 }
117 cpu_allocators_.push_back(allocator);
118 if (!sub_allocator) {
119 DCHECK(cpu_alloc_visitors_.empty() && cpu_free_visitors_.empty());
120 }
121 }
122 return cpu_allocators_[numa_node];
123 }
124
AddCPUAllocVisitor(SubAllocator::Visitor visitor)125 void ProcessState::AddCPUAllocVisitor(SubAllocator::Visitor visitor) {
126 VLOG(1) << "AddCPUAllocVisitor";
127 mutex_lock lock(mu_);
128 CHECK_EQ(0, cpu_allocators_.size()) // Crash OK
129 << "AddCPUAllocVisitor must be called prior to first call to "
130 "ProcessState::GetCPUAllocator";
131 cpu_alloc_visitors_.push_back(std::move(visitor));
132 }
133
AddCPUFreeVisitor(SubAllocator::Visitor visitor)134 void ProcessState::AddCPUFreeVisitor(SubAllocator::Visitor visitor) {
135 mutex_lock lock(mu_);
136 CHECK_EQ(0, cpu_allocators_.size()) // Crash OK
137 << "AddCPUFreeVisitor must be called prior to first call to "
138 "ProcessState::GetCPUAllocator";
139 cpu_free_visitors_.push_back(std::move(visitor));
140 }
141
TestOnlyReset()142 void ProcessState::TestOnlyReset() {
143 mutex_lock lock(mu_);
144 // Don't delete this value because it's static.
145 Allocator* default_cpu_allocator = cpu_allocator_base();
146 mem_desc_map_.clear();
147 for (Allocator* a : cpu_allocators_) {
148 if (a != default_cpu_allocator) delete a;
149 }
150 cpu_allocators_.clear();
151 gtl::STLDeleteElements(&cpu_al_);
152 }
153
154 } // namespace tensorflow
155