1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/base/metatrace.h"
18
19 #include "perfetto/base/compiler.h"
20 #include "perfetto/base/task_runner.h"
21 #include "perfetto/base/time.h"
22 #include "perfetto/ext/base/file_utils.h"
23 #include "perfetto/ext/base/thread_annotations.h"
24
25 namespace perfetto {
26 namespace metatrace {
27
28 std::atomic<uint32_t> g_enabled_tags{0};
29 std::atomic<uint64_t> g_enabled_timestamp{0};
30
31 // static members
32 constexpr size_t RingBuffer::kCapacity;
33 std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
34 std::atomic<bool> RingBuffer::read_task_queued_;
35 std::atomic<uint64_t> RingBuffer::wr_index_;
36 std::atomic<uint64_t> RingBuffer::rd_index_;
37 std::atomic<bool> RingBuffer::has_overruns_;
38 Record RingBuffer::bankruptcy_record_;
39
40 constexpr uint16_t Record::kTypeMask;
41 constexpr uint16_t Record::kTypeCounter;
42 constexpr uint16_t Record::kTypeEvent;
43
44 namespace {
45
46 // std::function<> is not trivially de/constructible. This struct wraps it in a
47 // heap-allocated struct to avoid static initializers.
48 struct Delegate {
GetInstanceperfetto::metatrace::__anona8f64dbb0111::Delegate49 static Delegate* GetInstance() {
50 static Delegate* instance = new Delegate();
51 return instance;
52 }
53
54 base::TaskRunner* task_runner = nullptr;
55 std::function<void()> read_task;
56 };
57
58 } // namespace
59
Enable(std::function<void ()> read_task,base::TaskRunner * task_runner,uint32_t tags)60 bool Enable(std::function<void()> read_task,
61 base::TaskRunner* task_runner,
62 uint32_t tags) {
63 PERFETTO_DCHECK(read_task);
64 PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
65 if (g_enabled_tags.load(std::memory_order_acquire))
66 return false;
67
68 Delegate* dg = Delegate::GetInstance();
69 dg->task_runner = task_runner;
70 dg->read_task = std::move(read_task);
71 RingBuffer::Reset();
72 g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
73 g_enabled_tags.store(tags, std::memory_order_release);
74 return true;
75 }
76
Disable()77 void Disable() {
78 g_enabled_tags.store(0, std::memory_order_release);
79 Delegate* dg = Delegate::GetInstance();
80 PERFETTO_DCHECK(!dg->task_runner ||
81 dg->task_runner->RunsTasksOnCurrentThread());
82 dg->task_runner = nullptr;
83 dg->read_task = nullptr;
84 }
85
86 // static
Reset()87 void RingBuffer::Reset() {
88 bankruptcy_record_.clear();
89 for (Record& record : records_)
90 record.clear();
91 wr_index_ = 0;
92 rd_index_ = 0;
93 has_overruns_ = false;
94 read_task_queued_ = false;
95 }
96
97 // static
AppendNewRecord()98 Record* RingBuffer::AppendNewRecord() {
99 auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
100
101 // rd_index can only monotonically increase, we don't care if we read an
102 // older value, we'll just hit the slow-path a bit earlier if it happens.
103 auto rd_index = rd_index_.load(std::memory_order_relaxed);
104
105 PERFETTO_DCHECK(wr_index >= rd_index);
106 auto size = wr_index - rd_index;
107 if (PERFETTO_LIKELY(size < kCapacity / 2))
108 return At(wr_index);
109
110 // Slow-path: Enqueue the read task and handle overruns.
111 bool expected = false;
112 if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
113 Delegate* dg = Delegate::GetInstance();
114 if (dg->task_runner) {
115 dg->task_runner->PostTask([] {
116 // Meta-tracing might have been disabled in the meantime.
117 auto read_task = Delegate::GetInstance()->read_task;
118 if (read_task)
119 read_task();
120 RingBuffer::read_task_queued_ = false;
121 });
122 }
123 }
124
125 if (PERFETTO_LIKELY(size < kCapacity))
126 return At(wr_index);
127
128 has_overruns_.store(true, std::memory_order_release);
129 wr_index_.fetch_sub(1, std::memory_order_acq_rel);
130
131 // In the case of overflows, threads will race writing on the same memory
132 // location and TSan will rightly complain. This is fine though because nobody
133 // will read the bankruptcy record and it's designed to contain garbage.
134 PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
135 "nothing reads bankruptcy_record_")
136 return &bankruptcy_record_;
137 }
138
139 // static
IsOnValidTaskRunner()140 bool RingBuffer::IsOnValidTaskRunner() {
141 auto* task_runner = Delegate::GetInstance()->task_runner;
142 return task_runner && task_runner->RunsTasksOnCurrentThread();
143 }
144
145 } // namespace metatrace
146 } // namespace perfetto
147