• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
18 
19 #include <iosfwd>
20 #include <map>
21 #include <memory>
22 #include <set>
23 #include <string>
24 #include <unordered_map>
25 #include <utility>
26 #include <vector>
27 
28 #include "absl/container/flat_hash_map.h"
29 #include "absl/container/flat_hash_set.h"
30 #include "tensorflow/compiler/xla/layout_util.h"
31 #include "tensorflow/compiler/xla/service/call_graph.h"
32 #include "tensorflow/compiler/xla/service/computation_layout.h"
33 #include "tensorflow/compiler/xla/service/hlo_computation.h"
34 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
35 #include "tensorflow/compiler/xla/service/hlo_module.h"
36 #include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
37 #include "tensorflow/compiler/xla/service/logical_buffer.h"
38 #include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
39 #include "tensorflow/compiler/xla/shape_layout.h"
40 #include "tensorflow/compiler/xla/shape_util.h"
41 #include "tensorflow/compiler/xla/statusor.h"
42 #include "tensorflow/compiler/xla/types.h"
43 #include "tensorflow/compiler/xla/xla_data.pb.h"
44 #include "tensorflow/core/lib/core/status.h"
45 #include "tensorflow/core/platform/types.h"
46 
47 namespace xla {
48 
49 // Abstract base class for layout constraints. These constraint objects are
50 // gathered together in LayoutConstraints object.
51 class LayoutConstraint {
52  public:
53   LayoutConstraint(bool mandatory, bool dfs,
54                    int64_t priority = kDefaultPriority)
mandatory_(mandatory)55       : mandatory_(mandatory), dfs_(dfs), priority_(priority) {}
56   virtual ~LayoutConstraint() = default;
57 
58   virtual string ToString() const = 0;
59 
60   // True if this constraint cannot be overwritten by a different constraint.
mandatory()61   bool mandatory() const { return mandatory_; }
62 
63   // When true, propagate in DFS. When false, constraint will propagate in BFS.
dfs()64   bool dfs() const { return dfs_; }
65 
66   // Return the priority of the current constraint. When conflicting constraints
67   // are encountered, the higher priority one should win.
priority()68   int64 priority() const { return priority_; }
69 
70   // The default priority of all constraints when not set explicitly.
71   static constexpr int64_t kDefaultPriority = 1;
72 
73  private:
74   bool mandatory_;
75   bool dfs_;
76   int64 priority_;
77 };
78 
79 std::ostream& operator<<(std::ostream& out, const LayoutConstraint& constraint);
80 
81 // Layout constraint on a single LogicalBuffer. This constrains the layout of an
82 // array produced by a particular instruction.
83 class BufferLayoutConstraint : public LayoutConstraint {
84  public:
85   BufferLayoutConstraint(const Layout& layout, const LogicalBuffer& buffer,
86                          bool mandatory, bool dfs,
87                          int64_t priority = LayoutConstraint::kDefaultPriority);
88 
buffer()89   const LogicalBuffer& buffer() const { return *buffer_; }
layout()90   const Layout& layout() const { return layout_; }
91 
92   string ToString() const override;
93 
94  private:
95   Layout layout_;
96   const LogicalBuffer* buffer_;
97 };
98 
99 // Constraint on the layout of the operand of an instruction. The constrained
100 // shape can be arbitrarily shaped (array or tuple). This is a constraint on the
101 // use of a shaped value and is not a hard constraint on the instruction(s)
102 // which define the value as copies may be inserted between the definition and
103 // use.
104 class OperandLayoutConstraint : public LayoutConstraint {
105  public:
106   OperandLayoutConstraint(
107       const ShapeLayout& shape_layout, const HloInstruction* instruction,
108       int64_t operand_no, bool mandatory, bool dfs,
109       int64_t priority = LayoutConstraint::kDefaultPriority);
110 
shape_layout()111   const ShapeLayout& shape_layout() const { return shape_layout_; }
instruction()112   const HloInstruction* instruction() const { return instruction_; }
operand_no()113   const int64 operand_no() const { return operand_no_; }
operand()114   const HloInstruction* operand() const {
115     return instruction_->operand(operand_no_);
116   }
117 
118   string ToString() const override;
119 
120  private:
121   ShapeLayout shape_layout_;
122   const HloInstruction* instruction_;
123   int64 operand_no_;
124 };
125 
126 // Constraint on the layout of the result of the entry computation.
127 class ResultLayoutConstraint : public LayoutConstraint {
128  public:
129   explicit ResultLayoutConstraint(
130       const ShapeLayout& shape_layout, bool dfs = false,
131       int64_t priority = LayoutConstraint::kDefaultPriority)
LayoutConstraint(true,dfs)132       : LayoutConstraint(/*mandatory=*/true, dfs),
133         shape_layout_(shape_layout) {}
134 
shape_layout()135   const ShapeLayout& shape_layout() const { return shape_layout_; }
136   string ToString() const override;
137 
138  private:
139   const ShapeLayout shape_layout_;
140 };
141 
142 // Class encapsulating the layout constraints of the values in a HLO
143 // computation.
144 class LayoutConstraints {
145  public:
146   LayoutConstraints(const TuplePointsToAnalysis& points_to_analysis,
147                     HloComputation* computation);
148   ~LayoutConstraints() = default;
149 
computation()150   const HloComputation* computation() const { return computation_; }
computation()151   HloComputation* computation() { return computation_; }
points_to_analysis()152   const TuplePointsToAnalysis& points_to_analysis() const {
153     return points_to_analysis_;
154   }
155 
156   // Return a vector containing the constraints which have been added to the
157   // LayoutConstraints object since the construction of the object or since the
158   // last time ConsumeAddedConstraints() has been called. This is used to
159   // identify newly added constraints when propagating layouts.
ConsumeAddedConstraints()160   std::vector<const LayoutConstraint*> ConsumeAddedConstraints() {
161     std::vector<const LayoutConstraint*> ret_vec(std::move(added_constraints_));
162     added_constraints_.clear();
163     return ret_vec;
164   }
ClearAddedConstraints()165   void ClearAddedConstraints() { added_constraints_.clear(); }
166 
167   // Returns the layout of a LogicalBuffer, the layout of the operand of the
168   // instruction, or the layout of the result of the computation, respectively,
169   // if it has been constrained. Otherwise return nullptr.
170   const Layout* BufferLayout(const LogicalBuffer& buffer) const;
171   const BufferLayoutConstraint* GetBufferLayoutConstraint(
172       const LogicalBuffer& buffer) const;
173   const ShapeLayout* OperandLayout(const HloInstruction* instruction,
174                                    int64_t operand_no) const;
175   const OperandLayoutConstraint* GetOperandLayoutConstraint(
176       const HloInstruction* instruction, int64_t operand_no) const;
177   const ShapeLayout* ResultLayout() const;
178 
179   // Add a constraint on the layout of a LogicalBuffer, the layout of the
180   // operand of the instruction, or the layout of the result of the computation,
181   // respectively.
182   Status SetBufferLayout(const Layout& layout, const LogicalBuffer& buffer,
183                          bool mandatory = true, bool dfs = true);
184   Status SetOperandLayout(const Shape& shape_with_layout,
185                           const HloInstruction* instruction, int64_t operand_no,
186                           bool mandatory = true, bool dfs = true);
187   Status SetResultLayout(const Shape& shape_with_layout, bool dfs = true);
188 
189   // Convenience wrapper around SetOperandLayout for setting the layout of a
190   // operand using a Layout object. The operand must be array-shaped.
191   Status SetArrayOperandLayout(const Layout& layout,
192                                const HloInstruction* instruction,
193                                int64_t operand_no, bool mandatory = true,
194                                bool dfs = true);
195 
196   // Convenience wrapper around SetBufferLayout. Sets the layouts of all buffers
197   // created by the instruction to the layouts in the given shape. The
198   // instruction must define every logical buffer in its output.
199   //
200   // If `allow_alias` is false, the function will check that all output buffers
201   // are defined by `instruction`, not aliased to an instruction elsewhere.
202   Status SetInstructionLayout(const Shape& shape_with_layout,
203                               const HloInstruction* instruction,
204                               bool mandatory = true, bool dfs = true,
205                               bool allow_alias = false);
206 
207   // Returns true if any buffer in the given operand is forwarded to the output
208   // of the given instruction. For example, the Tuple instruction forwards the
209   // buffers of its operands and would return true for each of its operands.
210   bool AnyOperandBufferForwarded(const HloInstruction* instruction,
211                                  int64_t operand_no) const;
212   // Similar to above, but returns true only if all buffers associated with that
213   // operand are forwarded.
214   bool AllOperandBuffersForwarded(const HloInstruction* instruction,
215                                   int64_t operand_no) const;
216 
217   // Returns the set of logical buffers (by LogicalBuffer:Id) which do not
218   // yet have a layout constraint
unconstrained_buffer_ids()219   const std::set<LogicalBuffer::Id>& unconstrained_buffer_ids() const {
220     return unconstrained_buffer_ids_;
221   }
222 
223   string ToString() const;
224 
225  private:
226   // Find a bufferset in the bufferset cache. This is useful since we can
227   // currently create the flattened buffer set for the same instruction many
228   // times, which is often slow.
229   PointsToSet::BufferSet* GetBufferSet(const HloInstruction* instruction) const;
230 
231   // The set of BufferLayoutConstraints applied to the computation.
232   std::unordered_map<const LogicalBuffer*, BufferLayoutConstraint>
233       buffer_constraints_;
234 
235   // The set of OperandLayoutConstraints applied to the computation.
236   using OperandConstraintKey = std::pair<const HloInstruction*, int64>;
237   std::map<OperandConstraintKey, OperandLayoutConstraint> operand_constraints_;
238 
239   // The result constraint for the computation (can be null).
240   std::unique_ptr<ResultLayoutConstraint> result_constraint_;
241 
242   // A vector which holds constraints as they are added. Can be cleared with
243   // ClearAddedConstraints.
244   std::vector<const LayoutConstraint*> added_constraints_;
245 
246   // Points-to analysis for the module. Used to propagate constraints through
247   // the HLO graph.
248   const TuplePointsToAnalysis& points_to_analysis_;
249 
250   // Array-shaped buffers which have not yet been constrained.
251   std::set<LogicalBuffer::Id> unconstrained_buffer_ids_;
252 
253   mutable absl::flat_hash_map<const HloInstruction*,
254                               std::unique_ptr<PointsToSet::BufferSet>>
255       buffer_sets_cache_;
256 
257   HloComputation* computation_;
258 };
259 
260 // Contains constraints on the layout of channels; sends and recvs.
261 class ChannelLayoutConstraints {
262  public:
263   // Construct an empty constraint set.
ChannelLayoutConstraints()264   ChannelLayoutConstraints() {}
265 
266   // Returns true if channel_id has a layout constraint.
IsChannelConstrained(int64_t channel_id)267   bool IsChannelConstrained(int64_t channel_id) const {
268     return constraints_.contains(channel_id);
269   }
270 
271   // Given `shape`, apply the layout for `channel_id`. `channel_id` must already
272   // be constrained.
LayoutShapeForChannel(Shape shape,int64_t channel_id)273   Shape LayoutShapeForChannel(Shape shape, int64_t channel_id) const {
274     auto it = constraints_.find(channel_id);
275     CHECK(it != constraints_.end()) << "Channel " << channel_id;
276     *shape.mutable_layout() = it->second;
277     return shape;
278   }
279 
280   // Returns the layout constraint for `channel_id`, which must already be
281   // constrained.
LayoutForChannel(int64_t channel_id)282   const Layout& LayoutForChannel(int64_t channel_id) const {
283     auto it = constraints_.find(channel_id);
284     CHECK(it != constraints_.end()) << "Channel " << channel_id;
285     return it->second;
286   }
287 
288   // Adds a new layout constraint for `channel_id`. If a constraint for
289   // `channel_id` has been added, this API returns nullptr, otherwise returns
290   // the layout which has already been set for the channel.
ConstrainChannel(int64_t channel_id,const Layout & layout)291   const Layout* ConstrainChannel(int64_t channel_id, const Layout& layout) {
292     auto it = constraints_.emplace(std::make_pair(channel_id, layout));
293     if (it.second) {
294       return nullptr;
295     }
296     return LayoutUtil::Equal(layout, it.first->second) ? nullptr
297                                                        : &it.first->second;
298   }
299 
300  private:
301   absl::flat_hash_map<int64, Layout> constraints_;
302 };
303 
304 // HLO pass which assigns layouts to all instructions in the HLO module while
305 // satisfying all necessary invariants and minimizing cost.
306 class LayoutAssignment : public HloModulePass {
307  public:
308   // entry_computation_layout is modified to populate a layout for the result in
309   // the case that no particular layout is requested.
310   //
311   // channel_constraints is both an input and output. Any sends or recvs that
312   // are present in channel_constraints will be laid out as constrained. Any
313   // unconstrained sends or recvs will be laid out as locally optimal and their
314   // layout will be added as a constraint to channel_constraints.
315   //
316   // If channel_constraints is nullptr, no kSend or kRecvs must be contained
317   // within any module passed to `Run`.
318   explicit LayoutAssignment(
319       ComputationLayout* entry_computation_layout,
320       ChannelLayoutConstraints* channel_constraints = nullptr,
321       bool reverse_computation_order = false);
~LayoutAssignment()322   ~LayoutAssignment() override {}
name()323   absl::string_view name() const override { return "layout-assignment"; }
324 
325   // Assign layouts to the given module. Returns whether the module was changed
326   // (any layouts were changed).
327   StatusOr<bool> Run(HloModule* module) override;
328 
329   // Determines whether an instruction can change layouts. An instruction not
330   // being able to change layout means that it requires operands with the same
331   // rank as the output to have the same layout as the output.
332   static bool InstructionCanChangeLayout(const HloInstruction* instruction);
333 
334   // In case of an array shape returns true iff it is at most rank 1. In case of
335   // a tuple shape returns true iff all leaf shapes are at most rank 1.
336   static bool IsAtMostRank1(const Shape& shape);
337 
338  protected:
339   // These methods, invoked by PropagateConstraints, propagate a layout
340   // constraint to its neighbors (i.e. operands and users) in order to minimize
341   // the cost of the instructions being constrainted on. New constraints are
342   // added to the given constraint set.
343   //
344   // Backends can override these methods with backend-specific propagation
345   // rules.
346   virtual Status PropagateBufferConstraint(
347       const BufferLayoutConstraint& buffer_constraint,
348       LayoutConstraints* constraints);
349   virtual Status PropagateOperandConstraint(
350       const OperandLayoutConstraint& operand_constraint,
351       LayoutConstraints* constraints);
352   virtual Status PropagateResultConstraint(
353       const ResultLayoutConstraint& layout_constraint,
354       LayoutConstraints* constraints);
355 
GetUnconstrainedLayout(const LogicalBuffer & buffer)356   virtual Layout GetUnconstrainedLayout(const LogicalBuffer& buffer) {
357     return LayoutUtil::GetDefaultLayoutForShape(buffer.shape());
358   }
359   // Called after layouts of an instruction have been finalized to allow
360   // subclasses to check for platform specific assumptions.
Verify(const HloInstruction * instruction)361   virtual Status Verify(const HloInstruction* instruction) {
362     return Status::OK();
363   }
364 
365   // Propagates a buffer layout constraint into the operands that use it.
366   Status PropagateBufferConstraintToUses(
367       const BufferLayoutConstraint& buffer_constraint,
368       LayoutConstraints* constraints);
369 
370   // Propagates a layout constraint on the use of the result of the given
371   // instruction to the definitions of the LogicalBuffers which make up the
372   // result.
373   Status PropagateUseConstraintToDefs(const ShapeLayout& shape_layout,
374                                       const HloInstruction* instruction,
375                                       LayoutConstraints* constraints);
376 
377   // Propagates the memory space defined in the entry computation to the called
378   // computations.
379   Status PropagateMemorySpace(HloModule* module);
380 
381   // Chooses a layout of operand `operand_no` of `instruction` that minimizes
382   // the cost of `instruction`. `output_layout` is the layout of `instruction`.
383   // Returns null if it can't decide the best layout.
384   // Precondition: `instruction` and the operand are array-shaped.
385   virtual std::unique_ptr<Layout> ChooseOperandLayoutFromOutputLayout(
386       const Layout& output_layout, const HloInstruction* instruction,
387       int64_t operand_no);
388   // Given the layout of `user`'s `operand_no`-th operand, chooses a layout of
389   // `user` that minimizes its cost on that operand.  Returns null if it can't
390   // decide the best layout.
391   // Precondition: `user` and the operand are array-shaped.
392   virtual std::unique_ptr<Layout> ChooseOutputLayoutFromOperandLayout(
393       const Layout& operand_layout, const HloInstruction* user,
394       int64_t operand_no);
395 
396   // Convenient wrapper for InstructionCanChangeLayout which can be overridden
397   // in subclasses.
398   virtual bool InstructionCanChangeLayoutInstance(
399       const HloInstruction* instruction);
400 
401  private:
402   // Initializes the layout assignment object for a new Run() call.
403   Status Init();
404 
405   // Adds constraints which must be satisfied for correctness on all
406   // backends. Called once prior to propagating constraints.
407   Status AddMandatoryConstraints(const ComputationLayout* computation_layout,
408                                  ChannelLayoutConstraints* channel_constraints,
409                                  HloComputation* computation,
410                                  LayoutConstraints* constraints);
411 
412   // This method can be overridden to add backend-specific constraints to the
413   // layout of the instructions of a computation. This method is called after
414   // all mandatory constraints have been added via AddMandatoryConstraints
415   // and before propagating constraints.
AddBackendConstraints(LayoutConstraints * constraints)416   virtual Status AddBackendConstraints(LayoutConstraints* constraints) {
417     return Status::OK();
418   }
419 
420   // Construct constraints and assign layouts to all instructions in the
421   // computation satisfying the given ComputationLayout, if not nullptr.
422   // Otherwise the ComputationLayout will be calculated by propagating the
423   // computation instruction constraints.
424   // Layouts constraints are added, then propagated until all LogicalBuffers in
425   // the computation are constrained.
426   Status RunOnComputation(ComputationLayout* computation_layout,
427                           HloComputation* computation,
428                           ChannelLayoutConstraints* channel_constraints);
429 
430   // Assign layouts to the instructions of a computation which satisfy the given
431   // layout constraints. Copies may be added to satisfy the constraints. The
432   // given LayoutConstraints must have layout constraints every logical buffer
433   // in the computation.
434   Status AssignLayouts(const LayoutConstraints& constraints,
435                        HloComputation* computation);
436 
437   // Propagates layout constraints from a set of initial constraints in order to
438   // minimize the local cost of the computation. This propagation is *not*
439   // required for correctness.
440   Status PropagateConstraints(LayoutConstraints* constraints);
441 
442   Status PropagateBufferConstraintToOperands(
443       const BufferLayoutConstraint& buffer_constraint,
444       LayoutConstraints* constraints);
445 
446   // Check that all layouts in the module have been set and satisfy all
447   // necessary conditions.
448   Status CheckLayouts(HloModule* module);
449 
450   // Computes the ComputationLayout of the given computation based of the
451   // layouts assigned to parameters and root instruction, and inserts it to the
452   // computation_layouts_ map.
453   Status CalculateComputationLayout(HloComputation* computation);
454 
455   // Clears all the layouts which can be cleared within a computation.
456   Status ClearComputationLayouts(HloComputation* computation);
457 
458   // Clears the side effects of a previous pass, like added copy instructions.
459   Status ClearPreviousPassSideEffects(HloModule* module);
460 
461   // Propagates the layouts computed by the layout assignment pass on the given
462   // computation, to the computation layout passed in to this API.
463   // This API propagates missing layout, and also checks that the caller
464   // specified have been respected, by comparing those with the parameters and
465   // root computation instruction.
466   Status PropagateComputationLayouts(HloComputation* computation,
467                                      ComputationLayout* computation_layout);
468 
469   // The pointer to the ComputationLayout passed as constructor parameter.
470   ComputationLayout* entry_computation_layout_;
471 
472   // A copy of entry_computation_layout_ used to reset it to the initial values
473   // during the multiple passes done by the layout assignment operation.
474   ComputationLayout saved_entry_computation_layout_;
475   // If set true, reverse the computation traversal order when assigning layout.
476   bool reverse_computation_order_;
477 
478  protected:
479   // Sets up the copy instruction according to the characteristic (sharding,
480   // metadata, ...) of the reference instruction. The index argument is used
481   // when the instruction is a tuple, and in such case the index represents
482   // the location from where the copy instruction was created from.
483   // If the index is empty, the whole sharding will be propagated, even in case
484   // the instruction has a tuple sharding.
485   static void SetupCopiedInstruction(const HloInstruction& instruction,
486                                      HloInstruction* copy,
487                                      const ShapeIndex& index);
488 
489   // Creates and returns a copy of the given instruction with a different
490   // layout. Tuple-shaped instructions will be deep-copied, and the last Tuple
491   // instruction producing the copy is returned.
492   StatusOr<HloInstruction*> CreateCopyWithNewLayout(
493       const Shape& shape_with_layout, HloInstruction* instruction);
494 
495   // Creates a copy of the given operand if the operand's layout does not match
496   // the given layout. This copy replaces the use in the given instruction.
497   // Tuple operands will be deep-copied.
498   virtual Status CopyOperandIfLayoutsDiffer(const ShapeLayout& operand_layout,
499                                             HloInstruction* instruction,
500                                             int64_t operand_no);
501 
502   // Registers a copy instruction added by the layout assignment pass.
RegisterAddedCopy(HloInstruction * copy)503   void RegisterAddedCopy(HloInstruction* copy) {
504     CHECK_EQ(copy->opcode(), HloOpcode::kCopy);
505     added_copies_.insert(copy);
506   }
507 
508   // Adds a copy for the operand of an instruction, unless such operand is
509   // already a copy, and has a single user (which is forcibly the instruction
510   // itself).
511   Status AddCopyForOperand(HloInstruction* instruction, int64_t operand_number);
512 
513   // Apply the channel layout constraints by populating the channel_constraints
514   // data structure passed in at constructor time. Eventually adds copies in
515   // case two ends of a channel ended up with a different leyout.
516   Status ConstrainChannelLayouts(HloComputation* computation,
517                                  ChannelLayoutConstraints* channel_constraints);
518 
519   // Resets the input ChannelLayoutConstraints to the original copy received
520   // from the constructor input.
ResetChannelConstraints()521   void ResetChannelConstraints() {
522     if (channel_layout_constraints_ != nullptr) {
523       *channel_layout_constraints_ = channel_constraints_;
524     }
525   }
526 
527   // Adds constraints related to host Send/Recv instructions.
528   Status BuildHostChannelConstraints(HloComputation* computation);
529 
530   // Map containing the layouts of all computations assigned so
531   // far. Computations are handled in a topological sort where computations are
532   // handled before their caller instructions so the layouts of caller
533   // instructions can be set to match the computation.
534   std::map<HloComputation*, ComputationLayout> computation_layouts_;
535 
536   // Map from branch computations to the result layout they should apply.
537   std::map<HloComputation*, ComputationLayout> conditional_mismatch_;
538 
539   // Every copy added to the module by the layout assignment pass is registered
540   // here.
541   absl::flat_hash_set<HloInstruction*> added_copies_;
542 
543   // The pointer to the channel layout constraints passed in with the
544   // constructor. If not nullptr, this is an input/output argument.
545   ChannelLayoutConstraints* channel_layout_constraints_ = nullptr;
546 
547   // A copy of the input layout constraints used to reset the above pointer in
548   // case we have to undo operations due to the multiple passes over the
549   // computations/instructions.
550   ChannelLayoutConstraints channel_constraints_;
551 
552   // Layout constraints for send/recv instructions which communicate with the
553   // host.
554   ChannelLayoutConstraints host_channel_constraints_;
555 
556   // Module points to analysis that can be updated for cloned computations.
557   std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
558 
559   // The set of HLO instructions which lacked any layout constraint, thus
560   // receiving propagated default layouts.
561   absl::flat_hash_set<const HloInstruction*> unconstrained_layout_instructions_;
562 
563   std::function<bool(const HloInstruction*)>
564       instruction_can_change_layout_func_;
565 
566   // CallGraph of the module, used to track callsites of each computation.
567   std::unique_ptr<CallGraph> call_graph_;
568 };
569 
570 }  // namespace xla
571 
572 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
573