• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_
17 #define TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_
18 
19 #include <memory>
20 #include <stack>
21 
22 #include "absl/synchronization/mutex.h"
23 #include "tensorflow/compiler/xla/statusor.h"
24 #include "tensorflow/compiler/xla/types.h"
25 #include "tensorflow/core/platform/stream_executor.h"
26 
27 namespace xla {
28 
29 class EventPool {
30  public:
31   class Handle {
32    public:
33     Handle() = default;
34     ~Handle();
35 
36     Handle(const Handle&) = delete;
37     Handle(Handle&&) = default;
38     Handle& operator=(const Handle&) = delete;
39     Handle& operator=(Handle&&) = default;
40 
41     // There is a total order on events handed out by the event pool. The most
42     // useful aspect of this total order is that two events returned by
43     // ThenAllocateAndRecordEvent on the same stream can be compared to see
44     // which was recorded earlier on that stream.
45     inline bool operator<(const Handle& rhs) const {
46       return sequence_number_ < rhs.sequence_number_;
47     }
48     inline bool operator>(const Handle& rhs) const { return rhs < *this; }
49     inline bool operator<=(const Handle& rhs) const { return !(*this > rhs); }
50     inline bool operator>=(const Handle& rhs) const { return !(*this < rhs); }
51 
event()52     se::Event* event() const { return event_.get(); }
sequence_number()53     uint64 sequence_number() const { return sequence_number_; }
54 
55    private:
56     friend class EventPool;
57 
58     EventPool* pool_ = nullptr;
59     std::unique_ptr<se::Event> event_;
60     uint64 sequence_number_;
61   };
62 
63   // Initializes a new EventPool. If `allow_reuse` is true, then events will be
64   // returned to the pool when their handles are deleted and made available to
65   // subsequent allocations. Reuse only works on the GPU platform.
66   explicit EventPool(bool allow_reuse);
67 
68   // Allocates a new (or reused) event from the pool, and records the event on
69   // `stream`.
70   //
71   // Reuse is only possible on GPU. Event allocation and recording are coupled
72   // in a single operation because on GPU it is recording an event that makes it
73   // a "new" event. According to the CUDA documentation it is safe to call
74   // cudaEventRecord even if that event may still be in use on the device; APIs
75   // such as cudaStreamWaitEvent capture the state of the event at the time of
76   // the host-side call and are not affected by a later host-side
77   // cudaEventRecord.
78   StatusOr<Handle> ThenAllocateAndRecordEvent(se::Stream* stream);
79 
80   // Version of ThenAllocateAndRecordEvent split into two phases; this is
81   // sometimes helpful if we want to avoid failures by preallocating events.
82   StatusOr<Handle> AllocateEvent(se::StreamExecutor* executor);
83   void ThenRecordEvent(se::Stream* stream, EventPool::Handle& handle);
84 
85  private:
86   const bool allow_reuse_;
87 
88   absl::Mutex mu_;
89   std::stack<std::unique_ptr<se::Event>> free_events_ TF_GUARDED_BY(mu_);
90   uint64 next_sequence_number_ TF_GUARDED_BY(mu_);
91 };
92 
93 }  // namespace xla
94 
95 #endif  // TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_
96