• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/base/metatrace.h"
18 
19 #include "perfetto/base/compiler.h"
20 #include "perfetto/base/task_runner.h"
21 #include "perfetto/base/time.h"
22 #include "perfetto/ext/base/file_utils.h"
23 
24 namespace perfetto {
25 namespace metatrace {
26 
27 std::atomic<uint32_t> g_enabled_tags{0};
28 std::atomic<uint64_t> g_enabled_timestamp{0};
29 
30 // static members
31 constexpr size_t RingBuffer::kCapacity;
32 std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
33 std::atomic<bool> RingBuffer::read_task_queued_;
34 std::atomic<uint64_t> RingBuffer::wr_index_;
35 std::atomic<uint64_t> RingBuffer::rd_index_;
36 std::atomic<bool> RingBuffer::has_overruns_;
37 Record RingBuffer::bankruptcy_record_;
38 
39 constexpr uint16_t Record::kTypeMask;
40 constexpr uint16_t Record::kTypeCounter;
41 constexpr uint16_t Record::kTypeEvent;
42 
43 namespace {
44 
45 // std::function<> is not trivially de/constructible. This struct wraps it in a
46 // heap-allocated struct to avoid static initializers.
47 struct Delegate {
GetInstanceperfetto::metatrace::__anon1b915e540111::Delegate48   static Delegate* GetInstance() {
49     static Delegate* instance = new Delegate();
50     return instance;
51   }
52 
53   base::TaskRunner* task_runner = nullptr;
54   std::function<void()> read_task;
55 };
56 
57 }  // namespace
58 
Enable(std::function<void ()> read_task,base::TaskRunner * task_runner,uint32_t tags)59 bool Enable(std::function<void()> read_task,
60             base::TaskRunner* task_runner,
61             uint32_t tags) {
62   PERFETTO_DCHECK(read_task);
63   PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
64   if (g_enabled_tags.load(std::memory_order_acquire))
65     return false;
66 
67   Delegate* dg = Delegate::GetInstance();
68   dg->task_runner = task_runner;
69   dg->read_task = std::move(read_task);
70   RingBuffer::Reset();
71   g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
72   g_enabled_tags.store(tags, std::memory_order_release);
73   return true;
74 }
75 
Disable()76 void Disable() {
77   g_enabled_tags.store(0, std::memory_order_release);
78   Delegate* dg = Delegate::GetInstance();
79   PERFETTO_DCHECK(!dg->task_runner ||
80                   dg->task_runner->RunsTasksOnCurrentThread());
81   dg->task_runner = nullptr;
82   dg->read_task = nullptr;
83 }
84 
85 // static
Reset()86 void RingBuffer::Reset() {
87   bankruptcy_record_.clear();
88   for (Record& record : records_)
89     record.clear();
90   wr_index_ = 0;
91   rd_index_ = 0;
92   has_overruns_ = false;
93   read_task_queued_ = false;
94 }
95 
96 // static
AppendNewRecord()97 Record* RingBuffer::AppendNewRecord() {
98   auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
99 
100   // rd_index can only monotonically increase, we don't care if we read an
101   // older value, we'll just hit the slow-path a bit earlier if it happens.
102   auto rd_index = rd_index_.load(std::memory_order_relaxed);
103 
104   PERFETTO_DCHECK(wr_index >= rd_index);
105   auto size = wr_index - rd_index;
106   if (PERFETTO_LIKELY(size < kCapacity / 2))
107     return At(wr_index);
108 
109   // Slow-path: Enqueue the read task and handle overruns.
110   bool expected = false;
111   if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
112     Delegate* dg = Delegate::GetInstance();
113     if (dg->task_runner) {
114       dg->task_runner->PostTask([] {
115         // Meta-tracing might have been disabled in the meantime.
116         auto read_task = Delegate::GetInstance()->read_task;
117         if (read_task)
118           read_task();
119         RingBuffer::read_task_queued_ = false;
120       });
121     }
122   }
123 
124   if (PERFETTO_LIKELY(size < kCapacity))
125     return At(wr_index);
126 
127   has_overruns_.store(true, std::memory_order_release);
128   wr_index_.fetch_sub(1, std::memory_order_acq_rel);
129 
130   // In the case of overflows, threads will race writing on the same memory
131   // location and TSan will rightly complain. This is fine though because nobody
132   // will read the bankruptcy record and it's designed to contain garbage.
133   PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
134                                       "nothing reads bankruptcy_record_")
135   return &bankruptcy_record_;
136 }
137 
138 // static
IsOnValidTaskRunner()139 bool RingBuffer::IsOnValidTaskRunner() {
140   auto* task_runner = Delegate::GetInstance()->task_runner;
141   return task_runner && task_runner->RunsTasksOnCurrentThread();
142 }
143 
144 }  // namespace metatrace
145 }  // namespace perfetto
146