• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef EXPERIMENTAL_BRAIN_TPU_1VM_MINIEXECUTOR_TPU_MESH_STATE_INTERFACE_H_
16 #define EXPERIMENTAL_BRAIN_TPU_1VM_MINIEXECUTOR_TPU_MESH_STATE_INTERFACE_H_
17 
18 #include <string>
19 
20 #include "tensorflow/core/framework/resource_mgr.h"
21 #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
22 #include "tensorflow/core/tpu/tpu_api.h"
23 #include "tensorflow/core/tpu/tpu_ops_c_api.h"
24 
25 namespace tensorflow {
26 
27 class TpuMeshCommonState;
28 
29 namespace tpu {
30 
31 const char kTpuMeshStateInterfaceResourceName[] = "tpu_mesh_common_state";
32 
33 class TpuMeshStateInterface : public tensorflow::ResourceBase {
34  public:
TpuMeshStateInterface(XLA_TpuMeshState * handle)35   explicit TpuMeshStateInterface(XLA_TpuMeshState* handle)
36       : mesh_state_(handle) {
37   }
38 
~TpuMeshStateInterface()39   ~TpuMeshStateInterface() override {
40     if (mesh_state_ != nullptr) {
41       OpsApiFn()->TpuMeshState_FreeFn(mesh_state_);
42     }
43   }
44 
Create()45   static TpuMeshStateInterface* Create() {
46     return new TpuMeshStateInterface(OpsApiFn()->TpuMeshState_CreateFn());
47   }
48 
data()49   const XLA_TpuMeshState* data() const { return mesh_state_; }
50 
mesh_common_state()51   tensorflow::TpuMeshCommonState* mesh_common_state() const {
52     return static_cast<tensorflow::TpuMeshCommonState*>(
53         OpsApiFn()->TpuMeshState_MeshCommonStateFn(mesh_state_));
54   }
55 
56   // Returns whether we should include the device assignment as a static field
57   // to the TPU program. This also determines whether we should include the
58   // device assignment as part of the compilation cache key.
NeedsStaticDeviceAssignment(const TPUCompileMetadataProto & metadata,TpuCoreTypeEnum tpu_core_type)59   bool NeedsStaticDeviceAssignment(
60       const TPUCompileMetadataProto& metadata,
61       TpuCoreTypeEnum tpu_core_type) const {
62     // Static device assignment enables XLA to perform certain optimization when
63     // all cores are used in the replicated computation.
64     return metadata.num_cores_per_replica() * metadata.num_replicas() ==
65            OpsApiFn()->TpuTopology_AvailableCoreCountFn(mesh_state_,
66                                                         tpu_core_type);
67   }
68 
DebugString()69   string DebugString() const override { return "TpuMeshStateInterface"; }
70 
71  private:
72   XLA_TpuMeshState* mesh_state_;
73 };
74 
75 }  // namespace tpu
76 }  // namespace tensorflow
77 
78 #endif  // EXPERIMENTAL_BRAIN_TPU_1VM_MINIEXECUTOR_TPU_MESH_STATE_INTERFACE_H_
79