1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PARTITIONING_UTILS_H_ 16 #define TENSORFLOW_CORE_COMMON_RUNTIME_PARTITIONING_UTILS_H_ 17 18 #include <unordered_map> 19 #include <vector> 20 21 #include "tensorflow/core/common_runtime/device_set.h" 22 #include "tensorflow/core/framework/function.h" 23 #include "tensorflow/core/lib/core/status.h" 24 25 namespace tensorflow { 26 27 // Given a `device_set` and a `graph`, partitions the `graph` into 28 // `subgraphs`. `subgraphs` maps device names to the graph assigned to that 29 // device. `graph` must have been placed (e.g. by running Placer), 30 // i.e. all nodes must have an assigned_device set. 31 // `graph` is non-const because the underlying Partition() function transforms 32 // the graph to correctly partition distributed control flow. 33 Status PartitionFunctionGraph( 34 const DeviceSet& device_set, std::unique_ptr<Graph> graph, 35 std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs); 36 37 // This function performs bookkeeping to track which `Arg` and `Retval` nodes 38 // were placed on a particular device / graph. 39 // 40 // More specifically, this function 41 // 42 // (1) rewrites the indices of the `Arg` and `Retval` nodes in `graph` to be 43 // consecutive. 44 // 45 // These indices might not be consecutive after grappler's pruning 46 // optimization (e.g. removing redundant Args), or graph partitioning. In 47 // the latter case, the nodes in `graph` are placed on `device_type`, and 48 // each such graph partition gets a subset of the arguments and return 49 // values. The `index` attributes of these _Arg and _Retval nodes reflect 50 // the indices of these parameters in the original function. To convert 51 // `subgraph` to a function, we need to replace there original indices with 52 // 0, 1, 2, ... . 53 // 54 // The argument and return value order in `graph` is determined by the 55 // argument and return value order in the original function. This stability 56 // is important because it enables us to treat a single-partition function 57 // as having the same signature as the subgraph. 58 // 59 // (2) records the subsets of `Arg` and `Retval` nodes assigned to the 60 // device in `*_indices`, and 61 // (3) records which `Arg` and `Retval` nodes live in host memory in 62 // `*_alloc_attrs`. If these vectors are NULL, do nothing here. 63 Status UpdateArgAndRetvalMetadata( 64 Graph* graph, const string& device_type, 65 std::vector<FunctionArgIndex>* arg_indices, std::vector<int>* ret_indices, 66 std::vector<AllocatorAttributes>* arg_alloc_attrs, 67 std::vector<AllocatorAttributes>* ret_alloc_attrs); 68 69 // Utility for generating function names not present in `flib_def`, using 70 // given `name` as the base for the name. 71 class FunctionNameGenerator { 72 public: 73 // `flib_def` must outlive this. FunctionNameGenerator(const FunctionLibraryDefinition * flib_def,const string & name)74 FunctionNameGenerator(const FunctionLibraryDefinition* flib_def, 75 const string& name) 76 : flib_def_(flib_def), name_(name), counter_(0) {} 77 78 // Returns a function name not present in `flib_def` using `name` as 79 // the base and appending a numeric suffix. 80 string GetName(); 81 82 private: 83 const FunctionLibraryDefinition* flib_def_; 84 const string name_; 85 uint32 counter_; 86 }; 87 88 } // namespace tensorflow 89 90 #endif // TENSORFLOW_CORE_COMMON_RUNTIME_PARTITIONING_UTILS_H_ 91