1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_EXECUTOR_MGR_H_ 16 #define TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_EXECUTOR_MGR_H_ 17 18 #include "tensorflow/core/framework/collective.h" 19 #include "tensorflow/core/lib/gtl/flatmap.h" 20 #include "tensorflow/core/platform/unbounded_work_queue.h" 21 22 namespace tensorflow { 23 class ConfigProto; 24 class DeviceMgr; 25 class NcclManager; 26 27 class CollectiveExecutorMgr : public CollectiveExecutorMgrInterface { 28 public: 29 CollectiveExecutorMgr( 30 const ConfigProto& config, const DeviceMgr* dev_mgr, 31 std::unique_ptr<DeviceResolverInterface> dev_resolver, 32 std::unique_ptr<ParamResolverInterface> param_resolver, 33 std::unique_ptr<NcclCommunicatorInterface> nccl_communicator); 34 35 virtual ~CollectiveExecutorMgr(); 36 37 CollectiveExecutor* FindOrCreate(int64 step_id) override; 38 39 void Cleanup(int64 step_id) override; 40 GetParamResolver()41 ParamResolverInterface* GetParamResolver() const override { 42 return param_resolver_.get(); 43 } 44 GetDeviceResolver()45 DeviceResolverInterface* GetDeviceResolver() const override { 46 return dev_resolver_.get(); 47 } 48 GetNcclCommunicator()49 NcclCommunicatorInterface* GetNcclCommunicator() const override { 50 return nccl_communicator_.get(); 51 } 52 53 void GetStepSequenceAsync(const GetStepSequenceRequest* request, 54 GetStepSequenceResponse* response, 55 const StatusCallback& done) override; 56 57 void RefreshStepIdSequenceAsync(int64 graph_key, 58 const StatusCallback& done) override; 59 NextStepId(int64 graph_key)60 int64 NextStepId(int64 graph_key) override { 61 return CollectiveExecutor::kInvalidId; 62 } 63 RetireStepId(int64 graph_key,int64 step_id)64 void RetireStepId(int64 graph_key, int64 step_id) override {} 65 66 protected: 67 // Called by FindOrCreate when table entry does not yet exist. 68 virtual CollectiveExecutor* Create(int64 step_id); 69 70 const DeviceMgr* dev_mgr_; 71 std::unique_ptr<DeviceResolverInterface> dev_resolver_; 72 std::unique_ptr<ParamResolverInterface> param_resolver_; 73 string gpu_ring_order_; 74 std::unique_ptr<NcclCommunicatorInterface> nccl_communicator_; 75 // Unbounded work queue for scheduling potentially-blocking work during 76 // collective op execution. Ownership is shared between `this` and 77 // `CollectiveRemoteAccessLocal`. 78 std::shared_ptr<UnboundedWorkQueue> work_queue_; 79 80 private: 81 mutex exec_mu_; 82 // Map from step_id to CollectiveExecutor 83 gtl::FlatMap<int64, CollectiveExecutor*> executor_table_ 84 TF_GUARDED_BY(exec_mu_); 85 }; 86 87 } // namespace tensorflow 88 #endif // TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_EXECUTOR_MGR_H_ 89