• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // This file defines the tf_device dialect: it contains operations that model
17 // TensorFlow's actions to launch computations on accelerator devices.
18 
19 #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_DEVICE_H_
20 #define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_DEVICE_H_
21 
22 #include "mlir/IR/Builders.h"  // TF:llvm-project
23 #include "mlir/IR/Dialect.h"  // TF:llvm-project
24 #include "mlir/IR/OpDefinition.h"  // TF:llvm-project
25 
26 namespace mlir {
27 namespace tf_device {
28 
29 // The TensorFlow Device dialect.
30 //
31 // This dialect contains operations to describe/launch computations on devices.
32 // These operations do not map 1-1 to TensorFlow ops and requires a lowering
33 // pass later to transform them into Compile/Run op pairs, like XlaCompile and
34 // XlaRun.
35 class TensorFlowDeviceDialect : public Dialect {
36  public:
37   // Constructing TensorFlowDevice dialect under an non-null MLIRContext.
38   explicit TensorFlowDeviceDialect(MLIRContext* context);
39 };
40 
41 // Declares the operations for this dialect using the generated header.
42 #define GET_OP_CLASSES
43 #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h.inc"
44 
45 // TODO(b/148642767): Use tablegen to define tf_device.parallel_execute op once
46 // variadic regions can be expressed in tablegen.
47 //
48 // ParallelExecute op concurrently executes variadic number of regions. Regions
49 // must represent separate sets of instructions to execute concurrently. In
50 // order to represent concurrently executed regions with dependencies, multiple
51 // ParallelExecute ops can be used instead. As so, regions within
52 // ParallelExecute op must not have control/data dependencies. While explicit
53 // dependencies between regions are disallowed, ParallelExecute op does not
54 // prevent implicit communication between regions (e.g. communication via
55 // send/recvs). In this case, users of ParallelExecute op must provide correct
56 // control dependencies between regions to guarantee correctness. Regions in
57 // ParallelExecute may include Resource ops. In the case where different regions
58 // include ops access the same resource, the users of the ParallelExecute op
59 // must provide mechanism (via send/recvs or via control dependencies) to
60 // guarantee correct ordering. Sequential ordering of ops within a region is
61 // guaranteed. Also, sequential ordering of ops before/after ParallelExecute ops
62 // are guaranteed. That is, execution of regions inside ParallelExecute op is
63 // blocked until all inputs to all regions are materialized and ops following
64 // ParallelExecute op are blocked until all regions are executed.
65 class ParallelExecuteOp
66     : public Op<ParallelExecuteOp,
67                 OpTrait::SingleBlockImplicitTerminator<ReturnOp>::Impl> {
68  public:
69   using Op::Op;
70 
71   static void build(Builder* builder, OperationState& state, int num_regions,
72                     llvm::ArrayRef<Type> output_types);
73 
getOperationName()74   static StringRef getOperationName() { return "tf_device.parallel_execute"; }
75 
76   Operation::result_range getRegionOutputs(unsigned region_index);
77   LogicalResult verify();
78   Block& getRegionWithIndex(unsigned index);
79 };
80 
81 }  // namespace tf_device
82 }  // namespace mlir
83 
84 #endif  // TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_DEVICE_H_
85