• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/framework/memory_types.h"
17 
18 #include <utility>
19 
20 #include "tensorflow/compiler/jit/defs.h"
21 #include "tensorflow/core/framework/attr_value.pb.h"
22 #include "tensorflow/core/framework/kernel_def.pb.h"
23 #include "tensorflow/core/framework/node_def.pb.h"
24 #include "tensorflow/core/framework/node_def_util.h"
25 #include "tensorflow/core/framework/op_kernel.h"
26 #include "tensorflow/core/framework/types.h"
27 #include "tensorflow/core/lib/core/errors.h"
28 #include "tensorflow/core/platform/types.h"
29 
30 namespace tensorflow {
31 
32 namespace {
33 // Returns the largest endpoint of anything in the name_map.
GetTotal(const NameRangeMap & name_map)34 int GetTotal(const NameRangeMap& name_map) {
35   int total = 0;
36   for (const auto& item : name_map) {
37     total = std::max(total, item.second.second);
38   }
39   return total;
40 }
41 
42 // Fills memory_types for either input or output, setting everything
43 // to DEVICE_MEMORY except those args in host_memory_args.  Removes
44 // elements of host_memory_args that were used.
MemoryTypesHelper(const NameRangeMap & name_map,std::vector<string> * host_memory_args,MemoryTypeVector * memory_types)45 void MemoryTypesHelper(const NameRangeMap& name_map,
46                        std::vector<string>* host_memory_args,
47                        MemoryTypeVector* memory_types) {
48   // Update args that have been marked as in "HOST_MEMORY".
49   size_t keep = 0;
50   for (size_t i = 0; i < host_memory_args->size(); ++i) {
51     auto iter = name_map.find((*host_memory_args)[i]);
52     if (iter != name_map.end()) {
53       for (int j = iter->second.first; j < iter->second.second; ++j) {
54         (*memory_types)[j] = HOST_MEMORY;
55       }
56     } else {
57       // (*host_memory_args)[i] not found, save it for the next pass.
58       if (i > keep) (*host_memory_args)[keep] = (*host_memory_args)[i];
59       ++keep;
60     }
61   }
62   host_memory_args->resize(keep);
63 }
64 
IsFunctionCallOp(const string & op_type)65 bool IsFunctionCallOp(const string& op_type) {
66   return op_type == "SymbolicGradient" || op_type == "PartitionedCall" ||
67          op_type == "StatefulPartitionedCall" || op_type == "While";
68 }
69 
70 }  // namespace
71 
MTypeFromDType(const DataType dtype)72 MemoryType MTypeFromDType(const DataType dtype) {
73   return (dtype == DT_INT32 || DataTypeAlwaysOnHost(dtype)) ? HOST_MEMORY
74                                                             : DEVICE_MEMORY;
75 }
76 
MTypeFromDTypeIntsOnDevice(const DataType dtype)77 MemoryType MTypeFromDTypeIntsOnDevice(const DataType dtype) {
78   return DataTypeAlwaysOnHost(dtype) ? HOST_MEMORY : DEVICE_MEMORY;
79 }
80 
MemoryTypesForNode(const OpRegistryInterface * op_registry,const DeviceType & device_type,const NodeDef & ndef,MemoryTypeVector * inp_mtypes,MemoryTypeVector * out_mtypes)81 Status MemoryTypesForNode(const OpRegistryInterface* op_registry,
82                           const DeviceType& device_type, const NodeDef& ndef,
83                           MemoryTypeVector* inp_mtypes,
84                           MemoryTypeVector* out_mtypes) {
85   // Look up the Op registered for this op name.
86   const OpDef* op_def;
87   TF_RETURN_IF_ERROR(op_registry->LookUpOpDef(ndef.op(), &op_def));
88 
89   // Look up the Kernel registered for this node def.
90   const KernelDef* kdef = nullptr;
91   Status status =
92       FindKernelDef(device_type, ndef, &kdef, nullptr /* kernel_class_name */);
93 
94   DataTypeVector inp_dtypes;
95   DataTypeVector out_dtypes;
96   TF_RETURN_IF_ERROR(
97       InOutTypesForNode(ndef, *op_def, &inp_dtypes, &out_dtypes));
98 
99   inp_mtypes->clear();
100   out_mtypes->clear();
101 
102   bool has_xla_compile = [&] {
103     const auto& it = ndef.attr().find(kXlaMustCompileAttr);
104     return it != ndef.attr().end() && it->second.b();
105   }();
106 
107   // For functions (which have no KernelDef) and their gradients, we can only
108   // best-effort derive the memory type from the data type. For now, we assume
109   // int32 is always on host memory and other types are always on device memory.
110   // TODO(zhifengc,phawkins): We should do type inference over function bodies
111   // to derive the correct input/output memory types. We should also split
112   // host-memory and non host-memory arguments into separate type lists.
113   if (!status.ok() || IsFunctionCallOp(ndef.op())) {
114     if (device_type.type_string() == "TPU" || has_xla_compile) {
115       // Here we assume that if tf.function() is called within
116       // "with tf.device('/device:TPU:0')", the whole function will be compiled
117       // and executed on TPU. This is true today, but when we implement auto
118       // clustering on function body, this will no longer be true. For example,
119       // we might want to place string arguments on host.
120       for (const auto& t : inp_dtypes)
121         inp_mtypes->push_back(MTypeFromDTypeIntsOnDevice(t));
122       for (const auto& t : out_dtypes)
123         out_mtypes->push_back(MTypeFromDTypeIntsOnDevice(t));
124     } else {
125       for (const auto& t : inp_dtypes) inp_mtypes->push_back(MTypeFromDType(t));
126       for (const auto& t : out_dtypes) out_mtypes->push_back(MTypeFromDType(t));
127     }
128     return Status::OK();
129   }
130 
131   // Gets the input/output names and their corresponding endpoint ranges.
132   NameRangeMap inp_names;
133   NameRangeMap out_names;
134   TF_RETURN_IF_ERROR(NameRangesForNode(ndef, *op_def, &inp_names, &out_names));
135 
136   // Now that we know the size, fill with the default 'DEVICE_MEMORY'.
137   inp_mtypes->resize(GetTotal(inp_names), DEVICE_MEMORY);
138   out_mtypes->resize(GetTotal(out_names), DEVICE_MEMORY);
139 
140   // Fills in host memory types based on the kernel def.
141   const auto& from_proto = kdef->host_memory_arg();
142   std::vector<string> host_memory_args(from_proto.begin(), from_proto.end());
143   MemoryTypesHelper(inp_names, &host_memory_args, inp_mtypes);
144   MemoryTypesHelper(out_names, &host_memory_args, out_mtypes);
145   if (!host_memory_args.empty()) {
146     return errors::InvalidArgument(
147         "HostMemory args '", absl::StrJoin(host_memory_args, "', '"),
148         "' not found in OpDef: ", SummarizeOpDef(*op_def));
149   }
150   CHECK_LE(inp_mtypes->size(), inp_dtypes.size());
151   CHECK_LE(out_mtypes->size(), out_dtypes.size());
152 
153   // Mark e.g. all resource and string types as host memory.
154   for (int i = 0; i < inp_mtypes->size(); ++i) {
155     if (DataTypeAlwaysOnHost(inp_dtypes[i])) {
156       (*inp_mtypes)[i] = HOST_MEMORY;
157     }
158   }
159   for (int i = 0; i < out_mtypes->size(); ++i) {
160     if (DataTypeAlwaysOnHost(out_dtypes[i])) {
161       (*out_mtypes)[i] = HOST_MEMORY;
162     }
163   }
164 
165   std::vector<int32> hostmem_attr;
166   if (TryGetNodeAttr(ndef, "_input_hostmem", &hostmem_attr)) {
167     for (int32 i : hostmem_attr) {
168       if (0 <= i && i < inp_mtypes->size()) {
169         (*inp_mtypes)[i] = HOST_MEMORY;
170       }
171     }
172   }
173   if (TryGetNodeAttr(ndef, "_output_hostmem", &hostmem_attr)) {
174     for (int32 i : hostmem_attr) {
175       if (0 <= i && i < out_mtypes->size()) {
176         (*out_mtypes)[i] = HOST_MEMORY;
177       }
178     }
179   }
180 
181   return Status::OK();
182 }
183 
184 }  // namespace tensorflow
185