• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_
16 #define TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_
17 
18 #include <string>
19 #include <vector>
20 
21 #include "tensorflow/core/common_runtime/device_set.h"
22 #include "tensorflow/core/framework/function.h"
23 #include "tensorflow/core/lib/core/status.h"
24 
25 namespace tensorflow {
26 namespace tfrt_stub {
27 
28 // Inserts send/recv ops to `graph` if nodes are assigned to multiple devices.
29 // Specifically, nodes on the same device will be wrapped in a function and
30 // invoked by a PartitionedCall op. All PartitionedCall ops are connected to a
31 // StatefulPartitionedCall op (which behaves as a 'stateful IdentityN') to
32 // protect them from being pruned in the subsequent MLIR lowering passes
33 // (b/232026253).
34 //
35 // The following shows a simple example of using this method.
36 //
37 // The original graph has four nodes that are placed on different devices.
38 //
39 //        ----->  op1(host)  ------
40 //       /                         \
41 //   input(host)               output(host)
42 //       \                         /
43 //        -----> op2(device) ------
44 //
45 // Calling this method will return the following graph, where `op1` is wrapped
46 // in the function invoked by `PartitionedCall_1`, and `op2` is wrapped in the
47 // function invoked by `PartitionedCall_2`. Both of them have a data dependency
48 // with the `StatefulPartitionedCall` op.
49 //
50 //   input ---> PartitionedCall_1 ----
51 //                                    \
52 //                         StatefulPartitionedCall ---> output
53 //                                    /
54 //              PartitionedCall_2 ----
55 //
56 StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
57     const std::string& graph_func_name, const DeviceSet& device_set,
58     const Device* host_device, const std::vector<std::string>& inputs,
59     const std::vector<std::string>& outputs,
60     const std::vector<std::string>& control_outputs,
61     std::unique_ptr<Graph> graph);
62 
63 }  // namespace tfrt_stub
64 }  // namespace tensorflow
65 
66 #endif  // TENSORFLOW_CORE_TFRT_UTILS_GRAPH_PARTITION_H_
67