1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_EXECUTABLE_RUN_OPTIONS_H_ 17 #define TENSORFLOW_COMPILER_XLA_EXECUTABLE_RUN_OPTIONS_H_ 18 19 // These classes are forward declared so that ExecutableRunOptions can be linked 20 // into an XLA-compiled binary without having to link all of the pointed-to 21 // objects (e.g., for an ahead-of-time compiled CPU binary, the gpu tools don't 22 // need to be linked). 23 namespace stream_executor { 24 class Stream; 25 class Platform; 26 } // namespace stream_executor 27 28 namespace Eigen { 29 struct ThreadPoolDevice; 30 } // namespace Eigen 31 32 namespace xla { 33 34 class DeviceMemoryAllocator; 35 class DeviceAssignment; 36 class ExecutionProfile; 37 38 // Class containing options for running a LocalExecutable. 39 class ExecutableRunOptions { 40 public: 41 // Specifies the allocator to use during execution. 42 ExecutableRunOptions& set_allocator(DeviceMemoryAllocator* allocator); 43 DeviceMemoryAllocator* allocator() const; 44 45 // If set, this is the device to run the computation on. Valid device_ordinal 46 // values are: 0 to # of devices - 1. These values are identical to the device 47 // ordinal values used by StreamExecutor. The device must be of the same type 48 // as the executable was compiled for. A value of -1 indicates this option has 49 // not been set. 50 ExecutableRunOptions& set_device_ordinal(int device_ordinal); 51 int device_ordinal() const; 52 53 // If set, this is the stream to run the computation on. The platform of the 54 // stream must match the platform the executable was built for. A value of 55 // nullptr indicates the option has not been set. 56 ExecutableRunOptions& set_stream(stream_executor::Stream* stream); 57 stream_executor::Stream* stream() const; 58 59 // If set, this is the stream to perform any pre-computation transfers on. 60 // The platform of the stream must match the platform the executable was 61 // built for. A value of nullptr indicates the option has not been set. 62 ExecutableRunOptions& set_host_to_device_stream( 63 stream_executor::Stream* stream); 64 stream_executor::Stream* host_to_device_stream() const; 65 66 // Sets the thread pool device on which to run Eigen subcomputations. 67 // Does not take ownership. 68 ExecutableRunOptions& set_intra_op_thread_pool( 69 const Eigen::ThreadPoolDevice* intra_op_thread_pool); 70 const Eigen::ThreadPoolDevice* intra_op_thread_pool() const; 71 72 // If set, profiling information is written to 'profile'. 73 ExecutionProfile* execution_profile() const; 74 ExecutableRunOptions& set_execution_profile(ExecutionProfile* profile); 75 76 ExecutableRunOptions& set_device_assignment( 77 const DeviceAssignment* device_assignment); 78 const DeviceAssignment* device_assignment() const; 79 80 ExecutableRunOptions& set_rng_seed(int rng_seed); 81 int rng_seed() const; 82 83 private: 84 DeviceMemoryAllocator* allocator_ = nullptr; 85 int device_ordinal_ = -1; 86 const DeviceAssignment* device_assignment_ = nullptr; 87 stream_executor::Stream* stream_ = nullptr; 88 const Eigen::ThreadPoolDevice* intra_op_thread_pool_ = nullptr; 89 ExecutionProfile* execution_profile_ = nullptr; 90 int rng_seed_ = 0; 91 stream_executor::Stream* host_to_device_stream_ = nullptr; 92 }; 93 94 } // namespace xla 95 96 #endif // TENSORFLOW_COMPILER_XLA_EXECUTABLE_RUN_OPTIONS_H_ 97