• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/core/tracing_service_impl.h"
18 
19 #include "perfetto/base/build_config.h"
20 #include "perfetto/tracing/core/forward_decls.h"
21 
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <string.h>
26 #include <regex>
27 #include <unordered_set>
28 
29 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
30     !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
31 #include <sys/uio.h>
32 #include <sys/utsname.h>
33 #include <unistd.h>
34 #endif
35 
36 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
37 #include <sys/system_properties.h>
38 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
39 #include "src/android_internal/lazy_library_loader.h"    // nogncheck
40 #include "src/android_internal/tracing_service_proxy.h"  // nogncheck
41 #endif  // PERFETTO_ANDROID_BUILD
42 #endif  // PERFETTO_OS_ANDROID
43 
44 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
45     PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
46     PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
47 #define PERFETTO_HAS_CHMOD
48 #include <sys/stat.h>
49 #endif
50 
51 #include <algorithm>
52 
53 #include "perfetto/base/build_config.h"
54 #include "perfetto/base/status.h"
55 #include "perfetto/base/task_runner.h"
56 #include "perfetto/ext/base/file_utils.h"
57 #include "perfetto/ext/base/metatrace.h"
58 #include "perfetto/ext/base/string_utils.h"
59 #include "perfetto/ext/base/temp_file.h"
60 #include "perfetto/ext/base/utils.h"
61 #include "perfetto/ext/base/version.h"
62 #include "perfetto/ext/base/watchdog.h"
63 #include "perfetto/ext/tracing/core/basic_types.h"
64 #include "perfetto/ext/tracing/core/consumer.h"
65 #include "perfetto/ext/tracing/core/observable_events.h"
66 #include "perfetto/ext/tracing/core/producer.h"
67 #include "perfetto/ext/tracing/core/shared_memory.h"
68 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
69 #include "perfetto/ext/tracing/core/trace_packet.h"
70 #include "perfetto/ext/tracing/core/trace_writer.h"
71 #include "perfetto/protozero/scattered_heap_buffer.h"
72 #include "perfetto/protozero/static_buffer.h"
73 #include "perfetto/tracing/core/data_source_descriptor.h"
74 #include "perfetto/tracing/core/tracing_service_capabilities.h"
75 #include "perfetto/tracing/core/tracing_service_state.h"
76 #include "src/android_stats/statsd_logging_helper.h"
77 #include "src/protozero/filtering/message_filter.h"
78 #include "src/tracing/core/packet_stream_validator.h"
79 #include "src/tracing/core/shared_memory_arbiter_impl.h"
80 #include "src/tracing/core/trace_buffer.h"
81 
82 #include "protos/perfetto/common/builtin_clock.gen.h"
83 #include "protos/perfetto/common/builtin_clock.pbzero.h"
84 #include "protos/perfetto/common/trace_stats.pbzero.h"
85 #include "protos/perfetto/config/trace_config.pbzero.h"
86 #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
87 #include "protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h"
88 #include "protos/perfetto/trace/system_info.pbzero.h"
89 #include "protos/perfetto/trace/trace_packet.pbzero.h"
90 #include "protos/perfetto/trace/trigger.pbzero.h"
91 
92 // General note: this class must assume that Producers are malicious and will
93 // try to crash / exploit this class. We can trust pointers because they come
94 // from the IPC layer, but we should never assume that that the producer calls
95 // come in the right order or their arguments are sane / within bounds.
96 
97 // This is a macro because we want the call-site line number for the ELOG.
98 #define PERFETTO_SVC_ERR(...) \
99   (PERFETTO_ELOG(__VA_ARGS__), ::perfetto::base::ErrStatus(__VA_ARGS__))
100 
101 namespace perfetto {
102 
103 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
104     PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
105 // These are the only SELinux approved dir for trace files that are created
106 // directly by traced.
107 const char* kTraceDirBasePath = "/data/misc/perfetto-traces/";
108 const char* kAndroidProductionBugreportTracePath =
109     "/data/misc/perfetto-traces/bugreport/systrace.pftrace";
110 #endif
111 
112 namespace {
113 constexpr int kMaxBuffersPerConsumer = 128;
114 constexpr uint32_t kDefaultSnapshotsIntervalMs = 10 * 1000;
115 constexpr int kDefaultWriteIntoFilePeriodMs = 5000;
116 constexpr int kMaxConcurrentTracingSessions = 15;
117 constexpr int kMaxConcurrentTracingSessionsPerUid = 5;
118 constexpr int kMaxConcurrentTracingSessionsForStatsdUid = 10;
119 constexpr int64_t kMinSecondsBetweenTracesGuardrail = 5 * 60;
120 
121 constexpr uint32_t kMillisPerHour = 3600000;
122 constexpr uint32_t kMillisPerDay = kMillisPerHour * 24;
123 constexpr uint32_t kMaxTracingDurationMillis = 7 * 24 * kMillisPerHour;
124 
125 // These apply only if enable_extra_guardrails is true.
126 constexpr uint32_t kGuardrailsMaxTracingBufferSizeKb = 128 * 1024;
127 constexpr uint32_t kGuardrailsMaxTracingDurationMillis = 24 * kMillisPerHour;
128 
129 #if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
130 struct iovec {
131   void* iov_base;  // Address
132   size_t iov_len;  // Block size
133 };
134 
135 // Simple implementation of writev. Note that this does not give the atomicity
136 // guarantees of a real writev, but we don't depend on these (we aren't writing
137 // to the same file from another thread).
writev(int fd,const struct iovec * iov,int iovcnt)138 ssize_t writev(int fd, const struct iovec* iov, int iovcnt) {
139   ssize_t total_size = 0;
140   for (int i = 0; i < iovcnt; ++i) {
141     ssize_t current_size = base::WriteAll(fd, iov[i].iov_base, iov[i].iov_len);
142     if (current_size != static_cast<ssize_t>(iov[i].iov_len))
143       return -1;
144     total_size += current_size;
145   }
146   return total_size;
147 }
148 
149 #define IOV_MAX 1024  // Linux compatible limit.
150 
151 #endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
152         // PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
153 
154 // Partially encodes a CommitDataRequest in an int32 for the purposes of
155 // metatracing. Note that it encodes only the bottom 10 bits of the producer id
156 // (which is technically 16 bits wide).
157 //
158 // Format (by bit range):
159 // [   31 ][         30 ][             29:20 ][            19:10 ][        9:0]
160 // [unused][has flush id][num chunks to patch][num chunks to move][producer id]
EncodeCommitDataRequest(ProducerID producer_id,const CommitDataRequest & req_untrusted)161 static int32_t EncodeCommitDataRequest(ProducerID producer_id,
162                                        const CommitDataRequest& req_untrusted) {
163   uint32_t cmov = static_cast<uint32_t>(req_untrusted.chunks_to_move_size());
164   uint32_t cpatch = static_cast<uint32_t>(req_untrusted.chunks_to_patch_size());
165   uint32_t has_flush_id = req_untrusted.flush_request_id() != 0;
166 
167   uint32_t mask = (1 << 10) - 1;
168   uint32_t acc = 0;
169   acc |= has_flush_id << 30;
170   acc |= (cpatch & mask) << 20;
171   acc |= (cmov & mask) << 10;
172   acc |= (producer_id & mask);
173   return static_cast<int32_t>(acc);
174 }
175 
SerializeAndAppendPacket(std::vector<TracePacket> * packets,std::vector<uint8_t> packet)176 void SerializeAndAppendPacket(std::vector<TracePacket>* packets,
177                               std::vector<uint8_t> packet) {
178   Slice slice = Slice::Allocate(packet.size());
179   memcpy(slice.own_data(), packet.data(), packet.size());
180   packets->emplace_back();
181   packets->back().AddSlice(std::move(slice));
182 }
183 
EnsureValidShmSizes(size_t shm_size,size_t page_size)184 std::tuple<size_t /*shm_size*/, size_t /*page_size*/> EnsureValidShmSizes(
185     size_t shm_size,
186     size_t page_size) {
187   // Theoretically the max page size supported by the ABI is 64KB.
188   // However, the current implementation of TraceBuffer (the non-shared
189   // userspace buffer where the service copies data) supports at most
190   // 32K. Setting 64K "works" from the producer<>consumer viewpoint
191   // but then causes the data to be discarded when copying it into
192   // TraceBuffer.
193   constexpr size_t kMaxPageSize = 32 * 1024;
194   static_assert(kMaxPageSize <= SharedMemoryABI::kMaxPageSize, "");
195 
196   if (page_size == 0)
197     page_size = TracingServiceImpl::kDefaultShmPageSize;
198   if (shm_size == 0)
199     shm_size = TracingServiceImpl::kDefaultShmSize;
200 
201   page_size = std::min<size_t>(page_size, kMaxPageSize);
202   shm_size = std::min<size_t>(shm_size, TracingServiceImpl::kMaxShmSize);
203 
204   // The tracing page size has to be multiple of 4K. On some systems (e.g. Mac
205   // on Arm64) the system page size can be larger (e.g., 16K). That doesn't
206   // matter here, because the tracing page size is just a logical partitioning
207   // and does not have any dependencies on kernel mm syscalls (read: it's fine
208   // to have trace page sizes of 4K on a system where the kernel page size is
209   // 16K).
210   bool page_size_is_valid = page_size >= SharedMemoryABI::kMinPageSize;
211   page_size_is_valid &= page_size % SharedMemoryABI::kMinPageSize == 0;
212 
213   // Only allow power of two numbers of pages, i.e. 1, 2, 4, 8 pages.
214   size_t num_pages = page_size / SharedMemoryABI::kMinPageSize;
215   page_size_is_valid &= (num_pages & (num_pages - 1)) == 0;
216 
217   if (!page_size_is_valid || shm_size < page_size ||
218       shm_size % page_size != 0) {
219     return std::make_tuple(TracingServiceImpl::kDefaultShmSize,
220                            TracingServiceImpl::kDefaultShmPageSize);
221   }
222   return std::make_tuple(shm_size, page_size);
223 }
224 
NameMatchesFilter(const std::string & name,const std::vector<std::string> & name_filter,const std::vector<std::string> & name_regex_filter)225 bool NameMatchesFilter(const std::string& name,
226                        const std::vector<std::string>& name_filter,
227                        const std::vector<std::string>& name_regex_filter) {
228   bool filter_is_set = !name_filter.empty() || !name_regex_filter.empty();
229   if (!filter_is_set)
230     return true;
231   bool filter_matches = std::find(name_filter.begin(), name_filter.end(),
232                                   name) != name_filter.end();
233   bool filter_regex_matches =
234       std::find_if(name_regex_filter.begin(), name_regex_filter.end(),
235                    [&](const std::string& regex) {
236                      return std::regex_match(
237                          name, std::regex(regex, std::regex::extended));
238                    }) != name_regex_filter.end();
239   return filter_matches || filter_regex_matches;
240 }
241 
242 // Used when:
243 // 1. TraceConfig.write_into_file == true and output_path is not empty.
244 // 2. Calling SaveTraceForBugreport(), from perfetto --save-for-bugreport.
CreateTraceFile(const std::string & path,bool overwrite)245 base::ScopedFile CreateTraceFile(const std::string& path, bool overwrite) {
246 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
247     PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
248   // This is NOT trying to preserve any security property, SELinux does that.
249   // It just improves the actionability of the error when people try to save the
250   // trace in a location that is not SELinux-allowed (a generic "permission
251   // denied" vs "don't put it here, put it there").
252   if (!base::StartsWith(path, kTraceDirBasePath)) {
253     PERFETTO_ELOG("Invalid output_path %s. On Android it must be within %s.",
254                   path.c_str(), kTraceDirBasePath);
255     return base::ScopedFile();
256   }
257 #endif
258   // O_CREAT | O_EXCL will fail if the file exists already.
259   const int flags = O_RDWR | O_CREAT | (overwrite ? O_TRUNC : O_EXCL);
260   auto fd = base::OpenFile(path, flags, 0600);
261   if (fd) {
262 #if defined(PERFETTO_HAS_CHMOD)
263     // Passing 0644 directly above won't work because of umask.
264     PERFETTO_CHECK(fchmod(*fd, 0644) == 0);
265 #endif
266   } else {
267     PERFETTO_PLOG("Failed to create %s", path.c_str());
268   }
269   return fd;
270 }
271 
GetBugreportTmpPath()272 std::string GetBugreportTmpPath() {
273   return GetBugreportPath() + ".tmp";
274 }
275 
ShouldLogEvent(const TraceConfig & cfg)276 bool ShouldLogEvent(const TraceConfig& cfg) {
277   switch (cfg.statsd_logging()) {
278     case TraceConfig::STATSD_LOGGING_ENABLED:
279       return true;
280     case TraceConfig::STATSD_LOGGING_DISABLED:
281       return false;
282     case TraceConfig::STATSD_LOGGING_UNSPECIFIED:
283       // For backward compatibility with older versions of perfetto_cmd.
284       return cfg.enable_extra_guardrails();
285   }
286   PERFETTO_FATAL("For GCC");
287 }
288 
289 }  // namespace
290 
291 // These constants instead are defined in the header because are used by tests.
292 constexpr size_t TracingServiceImpl::kDefaultShmSize;
293 constexpr size_t TracingServiceImpl::kDefaultShmPageSize;
294 
295 constexpr size_t TracingServiceImpl::kMaxShmSize;
296 constexpr uint32_t TracingServiceImpl::kDataSourceStopTimeoutMs;
297 constexpr uint8_t TracingServiceImpl::kSyncMarker[];
298 
GetBugreportPath()299 std::string GetBugreportPath() {
300 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
301     PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
302   return kAndroidProductionBugreportTracePath;
303 #else
304   // Only for tests, SaveTraceForBugreport is not used on other OSes.
305   return base::GetSysTempDir() + "/bugreport.pftrace";
306 #endif
307 }
308 
309 // static
CreateInstance(std::unique_ptr<SharedMemory::Factory> shm_factory,base::TaskRunner * task_runner)310 std::unique_ptr<TracingService> TracingService::CreateInstance(
311     std::unique_ptr<SharedMemory::Factory> shm_factory,
312     base::TaskRunner* task_runner) {
313   return std::unique_ptr<TracingService>(
314       new TracingServiceImpl(std::move(shm_factory), task_runner));
315 }
316 
TracingServiceImpl(std::unique_ptr<SharedMemory::Factory> shm_factory,base::TaskRunner * task_runner)317 TracingServiceImpl::TracingServiceImpl(
318     std::unique_ptr<SharedMemory::Factory> shm_factory,
319     base::TaskRunner* task_runner)
320     : task_runner_(task_runner),
321       shm_factory_(std::move(shm_factory)),
322       uid_(base::GetCurrentUserId()),
323       buffer_ids_(kMaxTraceBufferID),
324       trigger_probability_rand_(
325           static_cast<uint32_t>(base::GetWallTimeNs().count())),
326       weak_ptr_factory_(this) {
327   PERFETTO_DCHECK(task_runner_);
328 }
329 
~TracingServiceImpl()330 TracingServiceImpl::~TracingServiceImpl() {
331   // TODO(fmayer): handle teardown of all Producer.
332 }
333 
334 std::unique_ptr<TracingService::ProducerEndpoint>
ConnectProducer(Producer * producer,uid_t uid,const std::string & producer_name,size_t shared_memory_size_hint_bytes,bool in_process,ProducerSMBScrapingMode smb_scraping_mode,size_t shared_memory_page_size_hint_bytes,std::unique_ptr<SharedMemory> shm,const std::string & sdk_version)335 TracingServiceImpl::ConnectProducer(Producer* producer,
336                                     uid_t uid,
337                                     const std::string& producer_name,
338                                     size_t shared_memory_size_hint_bytes,
339                                     bool in_process,
340                                     ProducerSMBScrapingMode smb_scraping_mode,
341                                     size_t shared_memory_page_size_hint_bytes,
342                                     std::unique_ptr<SharedMemory> shm,
343                                     const std::string& sdk_version) {
344   PERFETTO_DCHECK_THREAD(thread_checker_);
345 
346   if (lockdown_mode_ && uid != base::GetCurrentUserId()) {
347     PERFETTO_DLOG("Lockdown mode. Rejecting producer with UID %ld",
348                   static_cast<unsigned long>(uid));
349     return nullptr;
350   }
351 
352   if (producers_.size() >= kMaxProducerID) {
353     PERFETTO_DFATAL("Too many producers.");
354     return nullptr;
355   }
356   const ProducerID id = GetNextProducerID();
357   PERFETTO_DLOG("Producer %" PRIu16 " connected", id);
358 
359   bool smb_scraping_enabled = smb_scraping_enabled_;
360   switch (smb_scraping_mode) {
361     case ProducerSMBScrapingMode::kDefault:
362       break;
363     case ProducerSMBScrapingMode::kEnabled:
364       smb_scraping_enabled = true;
365       break;
366     case ProducerSMBScrapingMode::kDisabled:
367       smb_scraping_enabled = false;
368       break;
369   }
370 
371   std::unique_ptr<ProducerEndpointImpl> endpoint(new ProducerEndpointImpl(
372       id, uid, this, task_runner_, producer, producer_name, sdk_version,
373       in_process, smb_scraping_enabled));
374   auto it_and_inserted = producers_.emplace(id, endpoint.get());
375   PERFETTO_DCHECK(it_and_inserted.second);
376   endpoint->shmem_size_hint_bytes_ = shared_memory_size_hint_bytes;
377   endpoint->shmem_page_size_hint_bytes_ = shared_memory_page_size_hint_bytes;
378 
379   // Producer::OnConnect() should run before Producer::OnTracingSetup(). The
380   // latter may be posted by SetupSharedMemory() below, so post OnConnect() now.
381   auto weak_ptr = endpoint->weak_ptr_factory_.GetWeakPtr();
382   task_runner_->PostTask([weak_ptr] {
383     if (weak_ptr)
384       weak_ptr->producer_->OnConnect();
385   });
386 
387   if (shm) {
388     // The producer supplied an SMB. This is used only by Chrome; in the most
389     // common cases the SMB is created by the service and passed via
390     // OnTracingSetup(). Verify that it is correctly sized before we attempt to
391     // use it. The transport layer has to verify the integrity of the SMB (e.g.
392     // ensure that the producer can't resize if after the fact).
393     size_t shm_size, page_size;
394     std::tie(shm_size, page_size) =
395         EnsureValidShmSizes(shm->size(), endpoint->shmem_page_size_hint_bytes_);
396     if (shm_size == shm->size() &&
397         page_size == endpoint->shmem_page_size_hint_bytes_) {
398       PERFETTO_DLOG(
399           "Adopting producer-provided SMB of %zu kB for producer \"%s\"",
400           shm_size / 1024, endpoint->name_.c_str());
401       endpoint->SetupSharedMemory(std::move(shm), page_size,
402                                   /*provided_by_producer=*/true);
403     } else {
404       PERFETTO_LOG(
405           "Discarding incorrectly sized producer-provided SMB for producer "
406           "\"%s\", falling back to service-provided SMB. Requested sizes: %zu "
407           "B total, %zu B page size; suggested corrected sizes: %zu B total, "
408           "%zu B page size",
409           endpoint->name_.c_str(), shm->size(),
410           endpoint->shmem_page_size_hint_bytes_, shm_size, page_size);
411       shm.reset();
412     }
413   }
414 
415   return std::unique_ptr<ProducerEndpoint>(std::move(endpoint));
416 }
417 
DisconnectProducer(ProducerID id)418 void TracingServiceImpl::DisconnectProducer(ProducerID id) {
419   PERFETTO_DCHECK_THREAD(thread_checker_);
420   PERFETTO_DLOG("Producer %" PRIu16 " disconnected", id);
421   PERFETTO_DCHECK(producers_.count(id));
422 
423   // Scrape remaining chunks for this producer to ensure we don't lose data.
424   if (auto* producer = GetProducer(id)) {
425     for (auto& session_id_and_session : tracing_sessions_)
426       ScrapeSharedMemoryBuffers(&session_id_and_session.second, producer);
427   }
428 
429   for (auto it = data_sources_.begin(); it != data_sources_.end();) {
430     auto next = it;
431     next++;
432     if (it->second.producer_id == id)
433       UnregisterDataSource(id, it->second.descriptor.name());
434     it = next;
435   }
436 
437   producers_.erase(id);
438   UpdateMemoryGuardrail();
439 }
440 
GetProducer(ProducerID id) const441 TracingServiceImpl::ProducerEndpointImpl* TracingServiceImpl::GetProducer(
442     ProducerID id) const {
443   PERFETTO_DCHECK_THREAD(thread_checker_);
444   auto it = producers_.find(id);
445   if (it == producers_.end())
446     return nullptr;
447   return it->second;
448 }
449 
450 std::unique_ptr<TracingService::ConsumerEndpoint>
ConnectConsumer(Consumer * consumer,uid_t uid)451 TracingServiceImpl::ConnectConsumer(Consumer* consumer, uid_t uid) {
452   PERFETTO_DCHECK_THREAD(thread_checker_);
453   PERFETTO_DLOG("Consumer %p connected from UID %" PRIu64,
454                 reinterpret_cast<void*>(consumer), static_cast<uint64_t>(uid));
455   std::unique_ptr<ConsumerEndpointImpl> endpoint(
456       new ConsumerEndpointImpl(this, task_runner_, consumer, uid));
457   auto it_and_inserted = consumers_.emplace(endpoint.get());
458   PERFETTO_DCHECK(it_and_inserted.second);
459   // Consumer might go away before we're able to send the connect notification,
460   // if that is the case just bail out.
461   auto weak_ptr = endpoint->weak_ptr_factory_.GetWeakPtr();
462   task_runner_->PostTask([weak_ptr] {
463     if (weak_ptr)
464       weak_ptr->consumer_->OnConnect();
465   });
466   return std::unique_ptr<ConsumerEndpoint>(std::move(endpoint));
467 }
468 
DisconnectConsumer(ConsumerEndpointImpl * consumer)469 void TracingServiceImpl::DisconnectConsumer(ConsumerEndpointImpl* consumer) {
470   PERFETTO_DCHECK_THREAD(thread_checker_);
471   PERFETTO_DLOG("Consumer %p disconnected", reinterpret_cast<void*>(consumer));
472   PERFETTO_DCHECK(consumers_.count(consumer));
473 
474   // TODO(primiano) : Check that this is safe (what happens if there are
475   // ReadBuffers() calls posted in the meantime? They need to become noop).
476   if (consumer->tracing_session_id_)
477     FreeBuffers(consumer->tracing_session_id_);  // Will also DisableTracing().
478   consumers_.erase(consumer);
479 
480   // At this point no more pointers to |consumer| should be around.
481   PERFETTO_DCHECK(!std::any_of(
482       tracing_sessions_.begin(), tracing_sessions_.end(),
483       [consumer](const std::pair<const TracingSessionID, TracingSession>& kv) {
484         return kv.second.consumer_maybe_null == consumer;
485       }));
486 }
487 
DetachConsumer(ConsumerEndpointImpl * consumer,const std::string & key)488 bool TracingServiceImpl::DetachConsumer(ConsumerEndpointImpl* consumer,
489                                         const std::string& key) {
490   PERFETTO_DCHECK_THREAD(thread_checker_);
491   PERFETTO_DLOG("Consumer %p detached", reinterpret_cast<void*>(consumer));
492   PERFETTO_DCHECK(consumers_.count(consumer));
493 
494   TracingSessionID tsid = consumer->tracing_session_id_;
495   TracingSession* tracing_session;
496   if (!tsid || !(tracing_session = GetTracingSession(tsid)))
497     return false;
498 
499   if (GetDetachedSession(consumer->uid_, key)) {
500     PERFETTO_ELOG("Another session has been detached with the same key \"%s\"",
501                   key.c_str());
502     return false;
503   }
504 
505   PERFETTO_DCHECK(tracing_session->consumer_maybe_null == consumer);
506   tracing_session->consumer_maybe_null = nullptr;
507   tracing_session->detach_key = key;
508   consumer->tracing_session_id_ = 0;
509   return true;
510 }
511 
AttachConsumer(ConsumerEndpointImpl * consumer,const std::string & key)512 bool TracingServiceImpl::AttachConsumer(ConsumerEndpointImpl* consumer,
513                                         const std::string& key) {
514   PERFETTO_DCHECK_THREAD(thread_checker_);
515   PERFETTO_DLOG("Consumer %p attaching to session %s",
516                 reinterpret_cast<void*>(consumer), key.c_str());
517   PERFETTO_DCHECK(consumers_.count(consumer));
518 
519   if (consumer->tracing_session_id_) {
520     PERFETTO_ELOG(
521         "Cannot reattach consumer to session %s"
522         " while it already attached tracing session ID %" PRIu64,
523         key.c_str(), consumer->tracing_session_id_);
524     return false;
525   }
526 
527   auto* tracing_session = GetDetachedSession(consumer->uid_, key);
528   if (!tracing_session) {
529     PERFETTO_ELOG(
530         "Failed to attach consumer, session '%s' not found for uid %d",
531         key.c_str(), static_cast<int>(consumer->uid_));
532     return false;
533   }
534 
535   consumer->tracing_session_id_ = tracing_session->id;
536   tracing_session->consumer_maybe_null = consumer;
537   tracing_session->detach_key.clear();
538   return true;
539 }
540 
EnableTracing(ConsumerEndpointImpl * consumer,const TraceConfig & cfg,base::ScopedFile fd)541 base::Status TracingServiceImpl::EnableTracing(ConsumerEndpointImpl* consumer,
542                                                const TraceConfig& cfg,
543                                                base::ScopedFile fd) {
544   PERFETTO_DCHECK_THREAD(thread_checker_);
545   PERFETTO_DLOG("Enabling tracing for consumer %p",
546                 reinterpret_cast<void*>(consumer));
547   MaybeLogUploadEvent(cfg, PerfettoStatsdAtom::kTracedEnableTracing);
548   if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_SET)
549     lockdown_mode_ = true;
550   if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_CLEAR)
551     lockdown_mode_ = false;
552 
553   // Scope |tracing_session| to this block to prevent accidental use of a null
554   // pointer later in this function.
555   {
556     TracingSession* tracing_session =
557         GetTracingSession(consumer->tracing_session_id_);
558     if (tracing_session) {
559       MaybeLogUploadEvent(
560           cfg, PerfettoStatsdAtom::kTracedEnableTracingExistingTraceSession);
561       return PERFETTO_SVC_ERR(
562           "A Consumer is trying to EnableTracing() but another tracing "
563           "session is already active (forgot a call to FreeBuffers() ?)");
564     }
565   }
566 
567   const uint32_t max_duration_ms = cfg.enable_extra_guardrails()
568                                        ? kGuardrailsMaxTracingDurationMillis
569                                        : kMaxTracingDurationMillis;
570   if (cfg.duration_ms() > max_duration_ms) {
571     MaybeLogUploadEvent(cfg,
572                         PerfettoStatsdAtom::kTracedEnableTracingTooLongTrace);
573     return PERFETTO_SVC_ERR("Requested too long trace (%" PRIu32
574                             "ms  > %" PRIu32 " ms)",
575                             cfg.duration_ms(), max_duration_ms);
576   }
577 
578   const bool has_trigger_config = cfg.trigger_config().trigger_mode() !=
579                                   TraceConfig::TriggerConfig::UNSPECIFIED;
580   if (has_trigger_config && (cfg.trigger_config().trigger_timeout_ms() == 0 ||
581                              cfg.trigger_config().trigger_timeout_ms() >
582                                  kGuardrailsMaxTracingDurationMillis)) {
583     MaybeLogUploadEvent(
584         cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidTriggerTimeout);
585     return PERFETTO_SVC_ERR(
586         "Traces with START_TRACING triggers must provide a positive "
587         "trigger_timeout_ms < 7 days (received %" PRIu32 "ms)",
588         cfg.trigger_config().trigger_timeout_ms());
589   }
590 
591   if (has_trigger_config && cfg.duration_ms() != 0) {
592     MaybeLogUploadEvent(
593         cfg, PerfettoStatsdAtom::kTracedEnableTracingDurationWithTrigger);
594     return PERFETTO_SVC_ERR(
595         "duration_ms was set, this must not be set for traces with triggers.");
596   }
597 
598   if (cfg.trigger_config().trigger_mode() ==
599           TraceConfig::TriggerConfig::STOP_TRACING &&
600       cfg.write_into_file()) {
601     // We don't support this usecase because there are subtle assumptions which
602     // break around TracingServiceEvents and windowed sorting (i.e. if we don't
603     // drain the events in ReadBuffers because we are waiting for STOP_TRACING,
604     // we can end up queueing up a lot of TracingServiceEvents and emitting them
605     // wildy out of order breaking windowed sorting in trace processor).
606     MaybeLogUploadEvent(
607         cfg, PerfettoStatsdAtom::kTracedEnableTracingStopTracingWriteIntoFile);
608     return PERFETTO_SVC_ERR(
609         "Specifying trigger mode STOP_TRACING and write_into_file together is "
610         "unsupported");
611   }
612 
613   std::unordered_set<std::string> triggers;
614   for (const auto& trigger : cfg.trigger_config().triggers()) {
615     if (!triggers.insert(trigger.name()).second) {
616       MaybeLogUploadEvent(
617           cfg, PerfettoStatsdAtom::kTracedEnableTracingDuplicateTriggerName);
618       return PERFETTO_SVC_ERR("Duplicate trigger name: %s",
619                               trigger.name().c_str());
620     }
621   }
622 
623   if (cfg.enable_extra_guardrails()) {
624     if (cfg.deferred_start()) {
625       MaybeLogUploadEvent(
626           cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidDeferredStart);
627       return PERFETTO_SVC_ERR(
628           "deferred_start=true is not supported in unsupervised traces");
629     }
630     uint64_t buf_size_sum = 0;
631     for (const auto& buf : cfg.buffers()) {
632       if (buf.size_kb() % 4 != 0) {
633         MaybeLogUploadEvent(
634             cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidBufferSize);
635         return PERFETTO_SVC_ERR(
636             "buffers.size_kb must be a multiple of 4, got %" PRIu32,
637             buf.size_kb());
638       }
639       buf_size_sum += buf.size_kb();
640     }
641     if (buf_size_sum > kGuardrailsMaxTracingBufferSizeKb) {
642       MaybeLogUploadEvent(
643           cfg, PerfettoStatsdAtom::kTracedEnableTracingBufferSizeTooLarge);
644       return PERFETTO_SVC_ERR("Requested too large trace buffer (%" PRIu64
645                               "kB  > %" PRIu32 " kB)",
646                               buf_size_sum, kGuardrailsMaxTracingBufferSizeKb);
647     }
648   }
649 
650   if (cfg.buffers_size() > kMaxBuffersPerConsumer) {
651     MaybeLogUploadEvent(cfg,
652                         PerfettoStatsdAtom::kTracedEnableTracingTooManyBuffers);
653     return PERFETTO_SVC_ERR("Too many buffers configured (%d)",
654                             cfg.buffers_size());
655   }
656 
657   if (!cfg.unique_session_name().empty()) {
658     const std::string& name = cfg.unique_session_name();
659     for (auto& kv : tracing_sessions_) {
660       if (kv.second.config.unique_session_name() == name) {
661         MaybeLogUploadEvent(
662             cfg, PerfettoStatsdAtom::kTracedEnableTracingDuplicateSessionName);
663         static const char fmt[] =
664             "A trace with this unique session name (%s) already exists";
665         // This happens frequently, don't make it an "E"LOG.
666         PERFETTO_LOG(fmt, name.c_str());
667         return base::ErrStatus(fmt, name.c_str());
668       }
669     }
670   }
671 
672   if (cfg.enable_extra_guardrails()) {
673     // unique_session_name can be empty
674     const std::string& name = cfg.unique_session_name();
675     int64_t now_s = base::GetBootTimeS().count();
676 
677     // Remove any entries where the time limit has passed so this map doesn't
678     // grow indefinitely:
679     std::map<std::string, int64_t>& sessions = session_to_last_trace_s_;
680     for (auto it = sessions.cbegin(); it != sessions.cend();) {
681       if (now_s - it->second > kMinSecondsBetweenTracesGuardrail) {
682         it = sessions.erase(it);
683       } else {
684         ++it;
685       }
686     }
687 
688     int64_t& previous_s = session_to_last_trace_s_[name];
689     if (previous_s == 0) {
690       previous_s = now_s;
691     } else {
692       MaybeLogUploadEvent(
693           cfg, PerfettoStatsdAtom::kTracedEnableTracingSessionNameTooRecent);
694       return PERFETTO_SVC_ERR(
695           "A trace with unique session name \"%s\" began less than %" PRId64
696           "s ago (%" PRId64 "s)",
697           name.c_str(), kMinSecondsBetweenTracesGuardrail, now_s - previous_s);
698     }
699   }
700 
701   const int sessions_for_uid = static_cast<int>(std::count_if(
702       tracing_sessions_.begin(), tracing_sessions_.end(),
703       [consumer](const decltype(tracing_sessions_)::value_type& s) {
704         return s.second.consumer_uid == consumer->uid_;
705       }));
706 
707   int per_uid_limit = kMaxConcurrentTracingSessionsPerUid;
708   if (consumer->uid_ == 1066 /* AID_STATSD*/) {
709     per_uid_limit = kMaxConcurrentTracingSessionsForStatsdUid;
710   }
711   if (sessions_for_uid >= per_uid_limit) {
712     MaybeLogUploadEvent(
713         cfg, PerfettoStatsdAtom::kTracedEnableTracingTooManySessionsForUid);
714     return PERFETTO_SVC_ERR(
715         "Too many concurrent tracing sesions (%d) for uid %d limit is %d",
716         sessions_for_uid, static_cast<int>(consumer->uid_), per_uid_limit);
717   }
718 
719   // TODO(primiano): This is a workaround to prevent that a producer gets stuck
720   // in a state where it stalls by design by having more TraceWriterImpl
721   // instances than free pages in the buffer. This is really a bug in
722   // trace_probes and the way it handles stalls in the shmem buffer.
723   if (tracing_sessions_.size() >= kMaxConcurrentTracingSessions) {
724     MaybeLogUploadEvent(
725         cfg, PerfettoStatsdAtom::kTracedEnableTracingTooManyConcurrentSessions);
726     return PERFETTO_SVC_ERR("Too many concurrent tracing sesions (%zu)",
727                             tracing_sessions_.size());
728   }
729 
730   // If the trace config provides a filter bytecode, setup the filter now.
731   // If the filter loading fails, abort the tracing session rather than running
732   // unfiltered.
733   std::unique_ptr<protozero::MessageFilter> trace_filter;
734   if (cfg.has_trace_filter()) {
735     const auto& filt = cfg.trace_filter();
736     const std::string& bytecode = filt.bytecode();
737     trace_filter.reset(new protozero::MessageFilter());
738     if (!trace_filter->LoadFilterBytecode(bytecode.data(), bytecode.size())) {
739       MaybeLogUploadEvent(
740           cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
741       return PERFETTO_SVC_ERR("Trace filter bytecode invalid, aborting");
742     }
743     // The filter is created using perfetto.protos.Trace as root message
744     // (because that makes it possible to play around with the `proto_filter`
745     // tool on actual traces). Here in the service, however, we deal with
746     // perfetto.protos.TracePacket(s), which are one level down (Trace.packet).
747     // The IPC client (or the write_into_filte logic in here) are responsible
748     // for pre-pending the packet preamble (See GetProtoPreamble() calls), but
749     // the preamble is not there at ReadBuffer time. Hence we change the root of
750     // the filtering to start at the Trace.packet level.
751     uint32_t packet_field_id = TracePacket::kPacketFieldNumber;
752     if (!trace_filter->SetFilterRoot(&packet_field_id, 1)) {
753       MaybeLogUploadEvent(
754           cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
755       return PERFETTO_SVC_ERR("Failed to set filter root.");
756     }
757   }
758 
759   const TracingSessionID tsid = ++last_tracing_session_id_;
760   TracingSession* tracing_session =
761       &tracing_sessions_
762            .emplace(std::piecewise_construct, std::forward_as_tuple(tsid),
763                     std::forward_as_tuple(tsid, consumer, cfg, task_runner_))
764            .first->second;
765 
766   if (trace_filter)
767     tracing_session->trace_filter = std::move(trace_filter);
768 
769   if (cfg.write_into_file()) {
770     if (!fd ^ !cfg.output_path().empty()) {
771       tracing_sessions_.erase(tsid);
772       MaybeLogUploadEvent(
773           tracing_session->config,
774           PerfettoStatsdAtom::kTracedEnableTracingInvalidFdOutputFile);
775       return PERFETTO_SVC_ERR(
776           "When write_into_file==true either a FD needs to be passed or "
777           "output_path must be populated (but not both)");
778     }
779     if (!cfg.output_path().empty()) {
780       fd = CreateTraceFile(cfg.output_path(), /*overwrite=*/false);
781       if (!fd) {
782         MaybeLogUploadEvent(
783             tracing_session->config,
784             PerfettoStatsdAtom::kTracedEnableTracingFailedToCreateFile);
785         tracing_sessions_.erase(tsid);
786         return PERFETTO_SVC_ERR("Failed to create the trace file %s",
787                                 cfg.output_path().c_str());
788       }
789     }
790     tracing_session->write_into_file = std::move(fd);
791     uint32_t write_period_ms = cfg.file_write_period_ms();
792     if (write_period_ms == 0)
793       write_period_ms = kDefaultWriteIntoFilePeriodMs;
794     if (write_period_ms < min_write_period_ms_)
795       write_period_ms = min_write_period_ms_;
796     tracing_session->write_period_ms = write_period_ms;
797     tracing_session->max_file_size_bytes = cfg.max_file_size_bytes();
798     tracing_session->bytes_written_into_file = 0;
799   }
800 
801   // Initialize the log buffers.
802   bool did_allocate_all_buffers = true;
803 
804   // Allocate the trace buffers. Also create a map to translate a consumer
805   // relative index (TraceConfig.DataSourceConfig.target_buffer) into the
806   // corresponding BufferID, which is a global ID namespace for the service and
807   // all producers.
808   size_t total_buf_size_kb = 0;
809   const size_t num_buffers = static_cast<size_t>(cfg.buffers_size());
810   tracing_session->buffers_index.reserve(num_buffers);
811   for (size_t i = 0; i < num_buffers; i++) {
812     const TraceConfig::BufferConfig& buffer_cfg = cfg.buffers()[i];
813     BufferID global_id = buffer_ids_.Allocate();
814     if (!global_id) {
815       did_allocate_all_buffers = false;  // We ran out of IDs.
816       break;
817     }
818     tracing_session->buffers_index.push_back(global_id);
819     const size_t buf_size_bytes = buffer_cfg.size_kb() * 1024u;
820     total_buf_size_kb += buffer_cfg.size_kb();
821     TraceBuffer::OverwritePolicy policy =
822         buffer_cfg.fill_policy() == TraceConfig::BufferConfig::DISCARD
823             ? TraceBuffer::kDiscard
824             : TraceBuffer::kOverwrite;
825     auto it_and_inserted = buffers_.emplace(
826         global_id, TraceBuffer::Create(buf_size_bytes, policy));
827     PERFETTO_DCHECK(it_and_inserted.second);  // buffers_.count(global_id) == 0.
828     std::unique_ptr<TraceBuffer>& trace_buffer = it_and_inserted.first->second;
829     if (!trace_buffer) {
830       did_allocate_all_buffers = false;
831       break;
832     }
833   }
834 
835   UpdateMemoryGuardrail();
836 
837   // This can happen if either:
838   // - All the kMaxTraceBufferID slots are taken.
839   // - OOM, or, more relistically, we exhausted virtual memory.
840   // In any case, free all the previously allocated buffers and abort.
841   // TODO(fmayer): add a test to cover this case, this is quite subtle.
842   if (!did_allocate_all_buffers) {
843     for (BufferID global_id : tracing_session->buffers_index) {
844       buffer_ids_.Free(global_id);
845       buffers_.erase(global_id);
846     }
847     tracing_sessions_.erase(tsid);
848     MaybeLogUploadEvent(tracing_session->config,
849                         PerfettoStatsdAtom::kTracedEnableTracingOom);
850     return PERFETTO_SVC_ERR(
851         "Failed to allocate tracing buffers: OOM or too many buffers");
852   }
853 
854   consumer->tracing_session_id_ = tsid;
855 
856   // Setup the data sources on the producers without starting them.
857   for (const TraceConfig::DataSource& cfg_data_source : cfg.data_sources()) {
858     // Scan all the registered data sources with a matching name.
859     auto range = data_sources_.equal_range(cfg_data_source.config().name());
860     for (auto it = range.first; it != range.second; it++) {
861       TraceConfig::ProducerConfig producer_config;
862       for (auto& config : cfg.producers()) {
863         if (GetProducer(it->second.producer_id)->name_ ==
864             config.producer_name()) {
865           producer_config = config;
866           break;
867         }
868       }
869       SetupDataSource(cfg_data_source, producer_config, it->second,
870                       tracing_session);
871     }
872   }
873 
874   bool has_start_trigger = false;
875   auto weak_this = weak_ptr_factory_.GetWeakPtr();
876   switch (cfg.trigger_config().trigger_mode()) {
877     case TraceConfig::TriggerConfig::UNSPECIFIED:
878       // no triggers are specified so this isn't a trace that is using triggers.
879       PERFETTO_DCHECK(!has_trigger_config);
880       break;
881     case TraceConfig::TriggerConfig::START_TRACING:
882       // For traces which use START_TRACE triggers we need to ensure that the
883       // tracing session will be cleaned up when it times out.
884       has_start_trigger = true;
885       task_runner_->PostDelayedTask(
886           [weak_this, tsid]() {
887             if (weak_this)
888               weak_this->OnStartTriggersTimeout(tsid);
889           },
890           cfg.trigger_config().trigger_timeout_ms());
891       break;
892     case TraceConfig::TriggerConfig::STOP_TRACING:
893       // Update the tracing_session's duration_ms to ensure that if no trigger
894       // is received the session will end and be cleaned up equal to the
895       // timeout.
896       //
897       // TODO(nuskos): Refactor this so that rather then modifying the config we
898       // have a field we look at on the tracing_session.
899       tracing_session->config.set_duration_ms(
900           cfg.trigger_config().trigger_timeout_ms());
901       break;
902   }
903 
904   tracing_session->state = TracingSession::CONFIGURED;
905   PERFETTO_LOG(
906       "Configured tracing session %" PRIu64
907       ", #sources:%zu, duration:%d ms, #buffers:%d, total "
908       "buffer size:%zu KB, total sessions:%zu, uid:%d session name: \"%s\"",
909       tsid, cfg.data_sources().size(), tracing_session->config.duration_ms(),
910       cfg.buffers_size(), total_buf_size_kb, tracing_sessions_.size(),
911       static_cast<unsigned int>(consumer->uid_),
912       cfg.unique_session_name().c_str());
913 
914   // Start the data sources, unless this is a case of early setup + fast
915   // triggering, either through TraceConfig.deferred_start or
916   // TraceConfig.trigger_config(). If both are specified which ever one occurs
917   // first will initiate the trace.
918   if (!cfg.deferred_start() && !has_start_trigger)
919     return StartTracing(tsid);
920 
921   return base::OkStatus();
922 }
923 
ChangeTraceConfig(ConsumerEndpointImpl * consumer,const TraceConfig & updated_cfg)924 void TracingServiceImpl::ChangeTraceConfig(ConsumerEndpointImpl* consumer,
925                                            const TraceConfig& updated_cfg) {
926   PERFETTO_DCHECK_THREAD(thread_checker_);
927   TracingSession* tracing_session =
928       GetTracingSession(consumer->tracing_session_id_);
929   PERFETTO_DCHECK(tracing_session);
930 
931   if ((tracing_session->state != TracingSession::STARTED) &&
932       (tracing_session->state != TracingSession::CONFIGURED)) {
933     PERFETTO_ELOG(
934         "ChangeTraceConfig() was called for a tracing session which isn't "
935         "running.");
936     return;
937   }
938 
939   // We only support updating producer_name_{,regex}_filter (and pass-through
940   // configs) for now; null out any changeable fields and make sure the rest are
941   // identical.
942   TraceConfig new_config_copy(updated_cfg);
943   for (auto& ds_cfg : *new_config_copy.mutable_data_sources()) {
944     ds_cfg.clear_producer_name_filter();
945     ds_cfg.clear_producer_name_regex_filter();
946   }
947 
948   TraceConfig current_config_copy(tracing_session->config);
949   for (auto& ds_cfg : *current_config_copy.mutable_data_sources()) {
950     ds_cfg.clear_producer_name_filter();
951     ds_cfg.clear_producer_name_regex_filter();
952   }
953 
954   if (new_config_copy != current_config_copy) {
955     PERFETTO_LOG(
956         "ChangeTraceConfig() was called with a config containing unsupported "
957         "changes; only adding to the producer_name_{,regex}_filter is "
958         "currently supported and will have an effect.");
959   }
960 
961   for (TraceConfig::DataSource& cfg_data_source :
962        *tracing_session->config.mutable_data_sources()) {
963     // Find the updated producer_filter in the new config.
964     std::vector<std::string> new_producer_name_filter;
965     std::vector<std::string> new_producer_name_regex_filter;
966     bool found_data_source = false;
967     for (const auto& it : updated_cfg.data_sources()) {
968       if (cfg_data_source.config().name() == it.config().name()) {
969         new_producer_name_filter = it.producer_name_filter();
970         new_producer_name_regex_filter = it.producer_name_regex_filter();
971         found_data_source = true;
972         break;
973       }
974     }
975 
976     // Bail out if data source not present in the new config.
977     if (!found_data_source) {
978       PERFETTO_ELOG(
979           "ChangeTraceConfig() called without a current data source also "
980           "present in the new config: %s",
981           cfg_data_source.config().name().c_str());
982       continue;
983     }
984 
985     // TODO(oysteine): Just replacing the filter means that if
986     // there are any filter entries which were present in the original config,
987     // but removed from the config passed to ChangeTraceConfig, any matching
988     // producers will keep producing but newly added producers after this
989     // point will never start.
990     *cfg_data_source.mutable_producer_name_filter() = new_producer_name_filter;
991     *cfg_data_source.mutable_producer_name_regex_filter() =
992         new_producer_name_regex_filter;
993 
994     // Scan all the registered data sources with a matching name.
995     auto range = data_sources_.equal_range(cfg_data_source.config().name());
996     for (auto it = range.first; it != range.second; it++) {
997       ProducerEndpointImpl* producer = GetProducer(it->second.producer_id);
998       PERFETTO_DCHECK(producer);
999 
1000       // Check if the producer name of this data source is present
1001       // in the name filters. We currently only support new filters, not
1002       // removing old ones.
1003       if (!NameMatchesFilter(producer->name_, new_producer_name_filter,
1004                              new_producer_name_regex_filter)) {
1005         continue;
1006       }
1007 
1008       bool already_setup = false;
1009       auto& ds_instances = tracing_session->data_source_instances;
1010       for (auto instance_it = ds_instances.begin();
1011            instance_it != ds_instances.end(); ++instance_it) {
1012         if (instance_it->first == it->second.producer_id &&
1013             instance_it->second.data_source_name ==
1014                 cfg_data_source.config().name()) {
1015           already_setup = true;
1016           break;
1017         }
1018       }
1019 
1020       if (already_setup)
1021         continue;
1022 
1023       // If it wasn't previously setup, set it up now.
1024       // (The per-producer config is optional).
1025       TraceConfig::ProducerConfig producer_config;
1026       for (auto& config : tracing_session->config.producers()) {
1027         if (producer->name_ == config.producer_name()) {
1028           producer_config = config;
1029           break;
1030         }
1031       }
1032 
1033       DataSourceInstance* ds_inst = SetupDataSource(
1034           cfg_data_source, producer_config, it->second, tracing_session);
1035 
1036       if (ds_inst && tracing_session->state == TracingSession::STARTED)
1037         StartDataSourceInstance(producer, tracing_session, ds_inst);
1038     }
1039   }
1040 }
1041 
StartTracing(TracingSessionID tsid)1042 base::Status TracingServiceImpl::StartTracing(TracingSessionID tsid) {
1043   PERFETTO_DCHECK_THREAD(thread_checker_);
1044 
1045   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1046   TracingSession* tracing_session = GetTracingSession(tsid);
1047   if (!tracing_session) {
1048     return PERFETTO_SVC_ERR(
1049         "StartTracing() failed, invalid session ID %" PRIu64, tsid);
1050   }
1051 
1052   MaybeLogUploadEvent(tracing_session->config,
1053                       PerfettoStatsdAtom::kTracedStartTracing);
1054 
1055   if (tracing_session->state != TracingSession::CONFIGURED) {
1056     MaybeLogUploadEvent(
1057         tracing_session->config,
1058         PerfettoStatsdAtom::kTracedStartTracingInvalidSessionState);
1059     return PERFETTO_SVC_ERR("StartTracing() failed, invalid session state: %d",
1060                             tracing_session->state);
1061   }
1062 
1063   tracing_session->state = TracingSession::STARTED;
1064 
1065   // We store the start of trace snapshot separately as it's important to make
1066   // sure we can interpret all the data in the trace and storing it in the ring
1067   // buffer means it could be overwritten by a later snapshot.
1068   if (!tracing_session->config.builtin_data_sources()
1069            .disable_clock_snapshotting()) {
1070     SnapshotClocks(&tracing_session->initial_clock_snapshot);
1071   }
1072 
1073   // We don't snapshot the clocks here because we just did this above.
1074   SnapshotLifecyleEvent(
1075       tracing_session,
1076       protos::pbzero::TracingServiceEvent::kTracingStartedFieldNumber,
1077       false /* snapshot_clocks */);
1078 
1079   // Periodically snapshot clocks, stats, sync markers while the trace is
1080   // active. The snapshots are emitted on the future ReadBuffers() calls, which
1081   // means that:
1082   //  (a) If we're streaming to a file (or to a consumer) while tracing, we
1083   //      write snapshots periodically into the trace.
1084   //  (b) If ReadBuffers() is only called after tracing ends, we emit the latest
1085   //      snapshot into the trace. For clock snapshots, we keep track of the
1086   //      snapshot recorded at the beginning of the session
1087   //      (initial_clock_snapshot above), as well as the most recent sampled
1088   //      snapshots that showed significant new drift between different clocks.
1089   //      The latter clock snapshots are sampled periodically and at lifecycle
1090   //      events.
1091   base::PeriodicTask::Args snapshot_task_args;
1092   snapshot_task_args.start_first_task_immediately = true;
1093   snapshot_task_args.use_suspend_aware_timer =
1094       tracing_session->config.builtin_data_sources()
1095           .prefer_suspend_clock_for_snapshot();
1096   snapshot_task_args.task = [weak_this, tsid] {
1097     if (weak_this)
1098       weak_this->PeriodicSnapshotTask(tsid);
1099   };
1100   snapshot_task_args.period_ms =
1101       tracing_session->config.builtin_data_sources().snapshot_interval_ms();
1102   if (!snapshot_task_args.period_ms)
1103     snapshot_task_args.period_ms = kDefaultSnapshotsIntervalMs;
1104   tracing_session->snapshot_periodic_task.Start(snapshot_task_args);
1105 
1106   // Trigger delayed task if the trace is time limited.
1107   const uint32_t trace_duration_ms = tracing_session->config.duration_ms();
1108   if (trace_duration_ms > 0) {
1109     task_runner_->PostDelayedTask(
1110         [weak_this, tsid] {
1111           // Skip entirely the flush if the trace session doesn't exist anymore.
1112           // This is to prevent misleading error messages to be logged.
1113           if (!weak_this)
1114             return;
1115           auto* tracing_session_ptr = weak_this->GetTracingSession(tsid);
1116           if (!tracing_session_ptr)
1117             return;
1118           // If this trace was using STOP_TRACING triggers and we've seen
1119           // one, then the trigger overrides the normal timeout. In this
1120           // case we just return and let the other task clean up this trace.
1121           if (tracing_session_ptr->config.trigger_config().trigger_mode() ==
1122                   TraceConfig::TriggerConfig::STOP_TRACING &&
1123               !tracing_session_ptr->received_triggers.empty())
1124             return;
1125           // In all other cases (START_TRACING or no triggers) we flush
1126           // after |trace_duration_ms| unconditionally.
1127           weak_this->FlushAndDisableTracing(tsid);
1128         },
1129         trace_duration_ms);
1130   }
1131 
1132   // Start the periodic drain tasks if we should to save the trace into a file.
1133   if (tracing_session->config.write_into_file()) {
1134     task_runner_->PostDelayedTask(
1135         [weak_this, tsid] {
1136           if (weak_this)
1137             weak_this->ReadBuffers(tsid, nullptr);
1138         },
1139         tracing_session->delay_to_next_write_period_ms());
1140   }
1141 
1142   // Start the periodic flush tasks if the config specified a flush period.
1143   if (tracing_session->config.flush_period_ms())
1144     PeriodicFlushTask(tsid, /*post_next_only=*/true);
1145 
1146   // Start the periodic incremental state clear tasks if the config specified a
1147   // period.
1148   if (tracing_session->config.incremental_state_config().clear_period_ms()) {
1149     PeriodicClearIncrementalStateTask(tsid, /*post_next_only=*/true);
1150   }
1151 
1152   for (auto& kv : tracing_session->data_source_instances) {
1153     ProducerID producer_id = kv.first;
1154     DataSourceInstance& data_source = kv.second;
1155     ProducerEndpointImpl* producer = GetProducer(producer_id);
1156     if (!producer) {
1157       PERFETTO_DFATAL("Producer does not exist.");
1158       continue;
1159     }
1160     StartDataSourceInstance(producer, tracing_session, &data_source);
1161   }
1162 
1163   MaybeNotifyAllDataSourcesStarted(tracing_session);
1164   return base::OkStatus();
1165 }
1166 
StartDataSourceInstance(ProducerEndpointImpl * producer,TracingSession * tracing_session,TracingServiceImpl::DataSourceInstance * instance)1167 void TracingServiceImpl::StartDataSourceInstance(
1168     ProducerEndpointImpl* producer,
1169     TracingSession* tracing_session,
1170     TracingServiceImpl::DataSourceInstance* instance) {
1171   PERFETTO_DCHECK(instance->state == DataSourceInstance::CONFIGURED);
1172   if (instance->will_notify_on_start) {
1173     instance->state = DataSourceInstance::STARTING;
1174   } else {
1175     instance->state = DataSourceInstance::STARTED;
1176   }
1177   if (tracing_session->consumer_maybe_null) {
1178     tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
1179         *producer, *instance);
1180   }
1181   producer->StartDataSource(instance->instance_id, instance->config);
1182 
1183   // If all data sources are started, notify the consumer.
1184   if (instance->state == DataSourceInstance::STARTED)
1185     MaybeNotifyAllDataSourcesStarted(tracing_session);
1186 }
1187 
1188 // DisableTracing just stops the data sources but doesn't free up any buffer.
1189 // This is to allow the consumer to freeze the buffers (by stopping the trace)
1190 // and then drain the buffers. The actual teardown of the TracingSession happens
1191 // in FreeBuffers().
DisableTracing(TracingSessionID tsid,bool disable_immediately)1192 void TracingServiceImpl::DisableTracing(TracingSessionID tsid,
1193                                         bool disable_immediately) {
1194   PERFETTO_DCHECK_THREAD(thread_checker_);
1195   TracingSession* tracing_session = GetTracingSession(tsid);
1196   if (!tracing_session) {
1197     // Can happen if the consumer calls this before EnableTracing() or after
1198     // FreeBuffers().
1199     PERFETTO_DLOG("DisableTracing() failed, invalid session ID %" PRIu64, tsid);
1200     return;
1201   }
1202 
1203   MaybeLogUploadEvent(tracing_session->config,
1204                       PerfettoStatsdAtom::kTracedDisableTracing);
1205 
1206   switch (tracing_session->state) {
1207     // Spurious call to DisableTracing() while already disabled, nothing to do.
1208     case TracingSession::DISABLED:
1209       PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
1210       return;
1211 
1212     // This is either:
1213     // A) The case of a graceful DisableTracing() call followed by a call to
1214     //    FreeBuffers(), iff |disable_immediately| == true. In this case we want
1215     //    to forcefully transition in the disabled state without waiting for the
1216     //    outstanding acks because the buffers are going to be destroyed soon.
1217     // B) A spurious call, iff |disable_immediately| == false, in which case
1218     //    there is nothing to do.
1219     case TracingSession::DISABLING_WAITING_STOP_ACKS:
1220       PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
1221       if (disable_immediately)
1222         DisableTracingNotifyConsumerAndFlushFile(tracing_session);
1223       return;
1224 
1225     // Continues below.
1226     case TracingSession::CONFIGURED:
1227       // If the session didn't even start there is no need to orchestrate a
1228       // graceful stop of data sources.
1229       disable_immediately = true;
1230       break;
1231 
1232     // This is the nominal case, continues below.
1233     case TracingSession::STARTED:
1234       break;
1235   }
1236 
1237   for (auto& data_source_inst : tracing_session->data_source_instances) {
1238     const ProducerID producer_id = data_source_inst.first;
1239     DataSourceInstance& instance = data_source_inst.second;
1240     ProducerEndpointImpl* producer = GetProducer(producer_id);
1241     PERFETTO_DCHECK(producer);
1242     PERFETTO_DCHECK(instance.state == DataSourceInstance::CONFIGURED ||
1243                     instance.state == DataSourceInstance::STARTING ||
1244                     instance.state == DataSourceInstance::STARTED);
1245     StopDataSourceInstance(producer, tracing_session, &instance,
1246                            disable_immediately);
1247   }
1248 
1249   // If the periodic task is running, we can stop the periodic snapshot timer
1250   // here instead of waiting until FreeBuffers to prevent useless snapshots
1251   // which won't be read.
1252   tracing_session->snapshot_periodic_task.Reset();
1253 
1254   // Either this request is flagged with |disable_immediately| or there are no
1255   // data sources that are requesting a final handshake. In both cases just mark
1256   // the session as disabled immediately, notify the consumer and flush the
1257   // trace file (if used).
1258   if (tracing_session->AllDataSourceInstancesStopped())
1259     return DisableTracingNotifyConsumerAndFlushFile(tracing_session);
1260 
1261   tracing_session->state = TracingSession::DISABLING_WAITING_STOP_ACKS;
1262   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1263   task_runner_->PostDelayedTask(
1264       [weak_this, tsid] {
1265         if (weak_this)
1266           weak_this->OnDisableTracingTimeout(tsid);
1267       },
1268       tracing_session->data_source_stop_timeout_ms());
1269 
1270   // Deliberately NOT removing the session from |tracing_session_|, it's still
1271   // needed to call ReadBuffers(). FreeBuffers() will erase() the session.
1272 }
1273 
NotifyDataSourceStarted(ProducerID producer_id,DataSourceInstanceID instance_id)1274 void TracingServiceImpl::NotifyDataSourceStarted(
1275     ProducerID producer_id,
1276     DataSourceInstanceID instance_id) {
1277   PERFETTO_DCHECK_THREAD(thread_checker_);
1278   for (auto& kv : tracing_sessions_) {
1279     TracingSession& tracing_session = kv.second;
1280     DataSourceInstance* instance =
1281         tracing_session.GetDataSourceInstance(producer_id, instance_id);
1282 
1283     if (!instance)
1284       continue;
1285 
1286     // If the tracing session was already stopped, ignore this notification.
1287     if (tracing_session.state != TracingSession::STARTED)
1288       continue;
1289 
1290     if (instance->state != DataSourceInstance::STARTING) {
1291       PERFETTO_ELOG("Started data source instance in incorrect state: %d",
1292                     instance->state);
1293       continue;
1294     }
1295 
1296     instance->state = DataSourceInstance::STARTED;
1297 
1298     ProducerEndpointImpl* producer = GetProducer(producer_id);
1299     PERFETTO_DCHECK(producer);
1300     if (tracing_session.consumer_maybe_null) {
1301       tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
1302           *producer, *instance);
1303     }
1304 
1305     // If all data sources are started, notify the consumer.
1306     MaybeNotifyAllDataSourcesStarted(&tracing_session);
1307   }  // for (tracing_session)
1308 }
1309 
MaybeNotifyAllDataSourcesStarted(TracingSession * tracing_session)1310 void TracingServiceImpl::MaybeNotifyAllDataSourcesStarted(
1311     TracingSession* tracing_session) {
1312   if (!tracing_session->consumer_maybe_null)
1313     return;
1314 
1315   if (!tracing_session->AllDataSourceInstancesStarted())
1316     return;
1317 
1318   // In some rare cases, we can get in this state more than once. Consider the
1319   // following scenario: 3 data sources are registered -> trace starts ->
1320   // all 3 data sources ack -> OnAllDataSourcesStarted() is called.
1321   // Imagine now that a 4th data source registers while the trace is ongoing.
1322   // This would hit the AllDataSourceInstancesStarted() condition again.
1323   // In this case, however, we don't want to re-notify the consumer again.
1324   // That would be unexpected (even if, perhaps, technically correct) and
1325   // trigger bugs in the consumer.
1326   if (tracing_session->did_notify_all_data_source_started)
1327     return;
1328 
1329   PERFETTO_DLOG("All data sources started");
1330 
1331   SnapshotLifecyleEvent(
1332       tracing_session,
1333       protos::pbzero::TracingServiceEvent::kAllDataSourcesStartedFieldNumber,
1334       true /* snapshot_clocks */);
1335 
1336   tracing_session->did_notify_all_data_source_started = true;
1337   tracing_session->consumer_maybe_null->OnAllDataSourcesStarted();
1338 }
1339 
NotifyDataSourceStopped(ProducerID producer_id,DataSourceInstanceID instance_id)1340 void TracingServiceImpl::NotifyDataSourceStopped(
1341     ProducerID producer_id,
1342     DataSourceInstanceID instance_id) {
1343   PERFETTO_DCHECK_THREAD(thread_checker_);
1344   for (auto& kv : tracing_sessions_) {
1345     TracingSession& tracing_session = kv.second;
1346     DataSourceInstance* instance =
1347         tracing_session.GetDataSourceInstance(producer_id, instance_id);
1348 
1349     if (!instance)
1350       continue;
1351 
1352     if (instance->state != DataSourceInstance::STOPPING) {
1353       PERFETTO_ELOG("Stopped data source instance in incorrect state: %d",
1354                     instance->state);
1355       continue;
1356     }
1357 
1358     instance->state = DataSourceInstance::STOPPED;
1359 
1360     ProducerEndpointImpl* producer = GetProducer(producer_id);
1361     PERFETTO_DCHECK(producer);
1362     if (tracing_session.consumer_maybe_null) {
1363       tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
1364           *producer, *instance);
1365     }
1366 
1367     if (!tracing_session.AllDataSourceInstancesStopped())
1368       continue;
1369 
1370     if (tracing_session.state != TracingSession::DISABLING_WAITING_STOP_ACKS)
1371       continue;
1372 
1373     // All data sources acked the termination.
1374     DisableTracingNotifyConsumerAndFlushFile(&tracing_session);
1375   }  // for (tracing_session)
1376 }
1377 
ActivateTriggers(ProducerID producer_id,const std::vector<std::string> & triggers)1378 void TracingServiceImpl::ActivateTriggers(
1379     ProducerID producer_id,
1380     const std::vector<std::string>& triggers) {
1381   PERFETTO_DCHECK_THREAD(thread_checker_);
1382   auto* producer = GetProducer(producer_id);
1383   PERFETTO_DCHECK(producer);
1384 
1385   int64_t now_ns = base::GetBootTimeNs().count();
1386   for (const auto& trigger_name : triggers) {
1387     PERFETTO_DLOG("Received ActivateTriggers request for \"%s\"",
1388                   trigger_name.c_str());
1389     base::Hash hash;
1390     hash.Update(trigger_name.c_str(), trigger_name.size());
1391 
1392     uint64_t trigger_name_hash = hash.digest();
1393     size_t count_in_window =
1394         PurgeExpiredAndCountTriggerInWindow(now_ns, trigger_name_hash);
1395 
1396     bool trigger_applied = false;
1397     for (auto& id_and_tracing_session : tracing_sessions_) {
1398       auto& tracing_session = id_and_tracing_session.second;
1399       TracingSessionID tsid = id_and_tracing_session.first;
1400       auto iter = std::find_if(
1401           tracing_session.config.trigger_config().triggers().begin(),
1402           tracing_session.config.trigger_config().triggers().end(),
1403           [&trigger_name](const TraceConfig::TriggerConfig::Trigger& trigger) {
1404             return trigger.name() == trigger_name;
1405           });
1406       if (iter == tracing_session.config.trigger_config().triggers().end()) {
1407         continue;
1408       }
1409 
1410       // If this trigger requires a certain producer to have sent it
1411       // (non-empty producer_name()) ensure the producer who sent this trigger
1412       // matches.
1413       if (!iter->producer_name_regex().empty() &&
1414           !std::regex_match(
1415               producer->name_,
1416               std::regex(iter->producer_name_regex(), std::regex::extended))) {
1417         continue;
1418       }
1419 
1420       // Use a random number between 0 and 1 to check if we should allow this
1421       // trigger through or not.
1422       double trigger_rnd =
1423           trigger_rnd_override_for_testing_ > 0
1424               ? trigger_rnd_override_for_testing_
1425               : trigger_probability_dist_(trigger_probability_rand_);
1426       PERFETTO_DCHECK(trigger_rnd >= 0 && trigger_rnd < 1);
1427       if (trigger_rnd < iter->skip_probability()) {
1428         MaybeLogTriggerEvent(tracing_session.config,
1429                              PerfettoTriggerAtom::kTracedLimitProbability,
1430                              trigger_name);
1431         continue;
1432       }
1433 
1434       // If we already triggered more times than the limit, silently ignore
1435       // this trigger.
1436       if (iter->max_per_24_h() > 0 && count_in_window >= iter->max_per_24_h()) {
1437         MaybeLogTriggerEvent(tracing_session.config,
1438                              PerfettoTriggerAtom::kTracedLimitMaxPer24h,
1439                              trigger_name);
1440         continue;
1441       }
1442       trigger_applied = true;
1443 
1444       const bool triggers_already_received =
1445           !tracing_session.received_triggers.empty();
1446       tracing_session.received_triggers.push_back(
1447           {static_cast<uint64_t>(now_ns), iter->name(), producer->name_,
1448            producer->uid_});
1449       auto weak_this = weak_ptr_factory_.GetWeakPtr();
1450       switch (tracing_session.config.trigger_config().trigger_mode()) {
1451         case TraceConfig::TriggerConfig::START_TRACING:
1452           // If the session has already been triggered and moved past
1453           // CONFIGURED then we don't need to repeat StartTracing. This would
1454           // work fine (StartTracing would return false) but would add error
1455           // logs.
1456           if (tracing_session.state != TracingSession::CONFIGURED)
1457             break;
1458 
1459           PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
1460                         " with duration of %" PRIu32 "ms.",
1461                         iter->name().c_str(), tsid, iter->stop_delay_ms());
1462           MaybeLogUploadEvent(tracing_session.config,
1463                               PerfettoStatsdAtom::kTracedTriggerStartTracing,
1464                               iter->name());
1465 
1466           // We override the trace duration to be the trigger's requested
1467           // value, this ensures that the trace will end after this amount
1468           // of time has passed.
1469           tracing_session.config.set_duration_ms(iter->stop_delay_ms());
1470           StartTracing(tsid);
1471           break;
1472         case TraceConfig::TriggerConfig::STOP_TRACING:
1473           // Only stop the trace once to avoid confusing log messages. I.E.
1474           // when we've already hit the first trigger we've already Posted the
1475           // task to FlushAndDisable. So all future triggers will just break
1476           // out.
1477           if (triggers_already_received)
1478             break;
1479 
1480           PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
1481                         " with duration of %" PRIu32 "ms.",
1482                         iter->name().c_str(), tsid, iter->stop_delay_ms());
1483           MaybeLogUploadEvent(tracing_session.config,
1484                               PerfettoStatsdAtom::kTracedTriggerStopTracing,
1485                               iter->name());
1486 
1487           // Now that we've seen a trigger we need to stop, flush, and disable
1488           // this session after the configured |stop_delay_ms|.
1489           task_runner_->PostDelayedTask(
1490               [weak_this, tsid] {
1491                 // Skip entirely the flush if the trace session doesn't exist
1492                 // anymore. This is to prevent misleading error messages to be
1493                 // logged.
1494                 if (weak_this && weak_this->GetTracingSession(tsid))
1495                   weak_this->FlushAndDisableTracing(tsid);
1496               },
1497               // If this trigger is zero this will immediately executable and
1498               // will happen shortly.
1499               iter->stop_delay_ms());
1500           break;
1501         case TraceConfig::TriggerConfig::UNSPECIFIED:
1502           PERFETTO_ELOG("Trigger activated but trigger mode unspecified.");
1503           break;
1504       }
1505     }  // for (.. : tracing_sessions_)
1506 
1507     if (trigger_applied) {
1508       trigger_history_.emplace_back(TriggerHistory{now_ns, trigger_name_hash});
1509     }
1510   }
1511 }
1512 
1513 // Always invoked kDataSourceStopTimeoutMs after DisableTracing(). In nominal
1514 // conditions all data sources should have acked the stop and this will early
1515 // out.
OnDisableTracingTimeout(TracingSessionID tsid)1516 void TracingServiceImpl::OnDisableTracingTimeout(TracingSessionID tsid) {
1517   PERFETTO_DCHECK_THREAD(thread_checker_);
1518   TracingSession* tracing_session = GetTracingSession(tsid);
1519   if (!tracing_session ||
1520       tracing_session->state != TracingSession::DISABLING_WAITING_STOP_ACKS) {
1521     return;  // Tracing session was successfully disabled.
1522   }
1523 
1524   PERFETTO_ILOG("Timeout while waiting for ACKs for tracing session %" PRIu64,
1525                 tsid);
1526   PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
1527   DisableTracingNotifyConsumerAndFlushFile(tracing_session);
1528 }
1529 
DisableTracingNotifyConsumerAndFlushFile(TracingSession * tracing_session)1530 void TracingServiceImpl::DisableTracingNotifyConsumerAndFlushFile(
1531     TracingSession* tracing_session) {
1532   PERFETTO_DCHECK(tracing_session->state != TracingSession::DISABLED);
1533   for (auto& inst_kv : tracing_session->data_source_instances) {
1534     if (inst_kv.second.state == DataSourceInstance::STOPPED)
1535       continue;
1536     inst_kv.second.state = DataSourceInstance::STOPPED;
1537     ProducerEndpointImpl* producer = GetProducer(inst_kv.first);
1538     PERFETTO_DCHECK(producer);
1539     if (tracing_session->consumer_maybe_null) {
1540       tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
1541           *producer, inst_kv.second);
1542     }
1543   }
1544   tracing_session->state = TracingSession::DISABLED;
1545 
1546   // Scrape any remaining chunks that weren't flushed by the producers.
1547   for (auto& producer_id_and_producer : producers_)
1548     ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
1549 
1550   SnapshotLifecyleEvent(
1551       tracing_session,
1552       protos::pbzero::TracingServiceEvent::kTracingDisabledFieldNumber,
1553       true /* snapshot_clocks */);
1554 
1555   if (tracing_session->write_into_file) {
1556     tracing_session->write_period_ms = 0;
1557     ReadBuffers(tracing_session->id, nullptr);
1558   }
1559 
1560   if (tracing_session->on_disable_callback_for_bugreport) {
1561     std::move(tracing_session->on_disable_callback_for_bugreport)();
1562     tracing_session->on_disable_callback_for_bugreport = nullptr;
1563   }
1564 
1565   MaybeLogUploadEvent(tracing_session->config,
1566                       PerfettoStatsdAtom::kTracedNotifyTracingDisabled);
1567 
1568   if (tracing_session->consumer_maybe_null)
1569     tracing_session->consumer_maybe_null->NotifyOnTracingDisabled("");
1570 }
1571 
Flush(TracingSessionID tsid,uint32_t timeout_ms,ConsumerEndpoint::FlushCallback callback)1572 void TracingServiceImpl::Flush(TracingSessionID tsid,
1573                                uint32_t timeout_ms,
1574                                ConsumerEndpoint::FlushCallback callback) {
1575   PERFETTO_DCHECK_THREAD(thread_checker_);
1576   TracingSession* tracing_session = GetTracingSession(tsid);
1577   if (!tracing_session) {
1578     PERFETTO_DLOG("Flush() failed, invalid session ID %" PRIu64, tsid);
1579     return;
1580   }
1581 
1582   if (!timeout_ms)
1583     timeout_ms = tracing_session->flush_timeout_ms();
1584 
1585   if (tracing_session->pending_flushes.size() > 1000) {
1586     PERFETTO_ELOG("Too many flushes (%zu) pending for the tracing session",
1587                   tracing_session->pending_flushes.size());
1588     callback(false);
1589     return;
1590   }
1591 
1592   FlushRequestID flush_request_id = ++last_flush_request_id_;
1593   PendingFlush& pending_flush =
1594       tracing_session->pending_flushes
1595           .emplace_hint(tracing_session->pending_flushes.end(),
1596                         flush_request_id, PendingFlush(std::move(callback)))
1597           ->second;
1598 
1599   // Send a flush request to each producer involved in the tracing session. In
1600   // order to issue a flush request we have to build a map of all data source
1601   // instance ids enabled for each producer.
1602   std::map<ProducerID, std::vector<DataSourceInstanceID>> flush_map;
1603   for (const auto& data_source_inst : tracing_session->data_source_instances) {
1604     const ProducerID producer_id = data_source_inst.first;
1605     const DataSourceInstanceID ds_inst_id = data_source_inst.second.instance_id;
1606     flush_map[producer_id].push_back(ds_inst_id);
1607   }
1608 
1609   for (const auto& kv : flush_map) {
1610     ProducerID producer_id = kv.first;
1611     ProducerEndpointImpl* producer = GetProducer(producer_id);
1612     const std::vector<DataSourceInstanceID>& data_sources = kv.second;
1613     producer->Flush(flush_request_id, data_sources);
1614     pending_flush.producers.insert(producer_id);
1615   }
1616 
1617   // If there are no producers to flush (realistically this happens only in
1618   // some tests) fire OnFlushTimeout() straight away, without waiting.
1619   if (flush_map.empty())
1620     timeout_ms = 0;
1621 
1622   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1623   task_runner_->PostDelayedTask(
1624       [weak_this, tsid, flush_request_id] {
1625         if (weak_this)
1626           weak_this->OnFlushTimeout(tsid, flush_request_id);
1627       },
1628       timeout_ms);
1629 }
1630 
NotifyFlushDoneForProducer(ProducerID producer_id,FlushRequestID flush_request_id)1631 void TracingServiceImpl::NotifyFlushDoneForProducer(
1632     ProducerID producer_id,
1633     FlushRequestID flush_request_id) {
1634   for (auto& kv : tracing_sessions_) {
1635     // Remove all pending flushes <= |flush_request_id| for |producer_id|.
1636     auto& pending_flushes = kv.second.pending_flushes;
1637     auto end_it = pending_flushes.upper_bound(flush_request_id);
1638     for (auto it = pending_flushes.begin(); it != end_it;) {
1639       PendingFlush& pending_flush = it->second;
1640       pending_flush.producers.erase(producer_id);
1641       if (pending_flush.producers.empty()) {
1642         auto weak_this = weak_ptr_factory_.GetWeakPtr();
1643         TracingSessionID tsid = kv.first;
1644         auto callback = std::move(pending_flush.callback);
1645         task_runner_->PostTask([weak_this, tsid, callback]() {
1646           if (weak_this) {
1647             weak_this->CompleteFlush(tsid, std::move(callback),
1648                                      /*success=*/true);
1649           }
1650         });
1651         it = pending_flushes.erase(it);
1652       } else {
1653         it++;
1654       }
1655     }  // for (pending_flushes)
1656   }    // for (tracing_session)
1657 }
1658 
OnFlushTimeout(TracingSessionID tsid,FlushRequestID flush_request_id)1659 void TracingServiceImpl::OnFlushTimeout(TracingSessionID tsid,
1660                                         FlushRequestID flush_request_id) {
1661   TracingSession* tracing_session = GetTracingSession(tsid);
1662   if (!tracing_session)
1663     return;
1664   auto it = tracing_session->pending_flushes.find(flush_request_id);
1665   if (it == tracing_session->pending_flushes.end())
1666     return;  // Nominal case: flush was completed and acked on time.
1667 
1668   // If there were no producers to flush, consider it a success.
1669   bool success = it->second.producers.empty();
1670 
1671   auto callback = std::move(it->second.callback);
1672   tracing_session->pending_flushes.erase(it);
1673   CompleteFlush(tsid, std::move(callback), success);
1674 }
1675 
CompleteFlush(TracingSessionID tsid,ConsumerEndpoint::FlushCallback callback,bool success)1676 void TracingServiceImpl::CompleteFlush(TracingSessionID tsid,
1677                                        ConsumerEndpoint::FlushCallback callback,
1678                                        bool success) {
1679   TracingSession* tracing_session = GetTracingSession(tsid);
1680   if (!tracing_session) {
1681     callback(false);
1682     return;
1683   }
1684   // Producers may not have been able to flush all their data, even if they
1685   // indicated flush completion. If possible, also collect uncommitted chunks
1686   // to make sure we have everything they wrote so far.
1687   for (auto& producer_id_and_producer : producers_) {
1688     ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
1689   }
1690   SnapshotLifecyleEvent(
1691       tracing_session,
1692       protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
1693       true /* snapshot_clocks */);
1694   callback(success);
1695 }
1696 
ScrapeSharedMemoryBuffers(TracingSession * tracing_session,ProducerEndpointImpl * producer)1697 void TracingServiceImpl::ScrapeSharedMemoryBuffers(
1698     TracingSession* tracing_session,
1699     ProducerEndpointImpl* producer) {
1700   if (!producer->smb_scraping_enabled_)
1701     return;
1702 
1703   // Can't copy chunks if we don't know about any trace writers.
1704   if (producer->writers_.empty())
1705     return;
1706 
1707   // Performance optimization: On flush or session disconnect, this method is
1708   // called for each producer. If the producer doesn't participate in the
1709   // session, there's no need to scape its chunks right now. We can tell if a
1710   // producer participates in the session by checking if the producer is allowed
1711   // to write into the session's log buffers.
1712   const auto& session_buffers = tracing_session->buffers_index;
1713   bool producer_in_session =
1714       std::any_of(session_buffers.begin(), session_buffers.end(),
1715                   [producer](BufferID buffer_id) {
1716                     return producer->allowed_target_buffers_.count(buffer_id);
1717                   });
1718   if (!producer_in_session)
1719     return;
1720 
1721   PERFETTO_DLOG("Scraping SMB for producer %" PRIu16, producer->id_);
1722 
1723   // Find and copy any uncommitted chunks from the SMB.
1724   //
1725   // In nominal conditions, the page layout of the used SMB pages should never
1726   // change because the service is the only one who is supposed to modify used
1727   // pages (to make them free again).
1728   //
1729   // However, the code here needs to deal with the case of a malicious producer
1730   // altering the SMB in unpredictable ways. Thankfully the SMB size is
1731   // immutable, so a chunk will always point to some valid memory, even if the
1732   // producer alters the intended layout and chunk header concurrently.
1733   // Ultimately a malicious producer altering the SMB's chunk layout while we
1734   // are iterating in this function is not any different from the case of a
1735   // malicious producer asking to commit a chunk made of random data, which is
1736   // something this class has to deal with regardless.
1737   //
1738   // The only legitimate mutations that can happen from sane producers,
1739   // concurrently to this function, are:
1740   //   A. free pages being partitioned,
1741   //   B. free chunks being migrated to kChunkBeingWritten,
1742   //   C. kChunkBeingWritten chunks being migrated to kChunkCompleted.
1743 
1744   SharedMemoryABI* abi = &producer->shmem_abi_;
1745   // num_pages() is immutable after the SMB is initialized and cannot be changed
1746   // even by a producer even if malicious.
1747   for (size_t page_idx = 0; page_idx < abi->num_pages(); page_idx++) {
1748     uint32_t layout = abi->GetPageLayout(page_idx);
1749 
1750     uint32_t used_chunks = abi->GetUsedChunks(layout);  // Returns a bitmap.
1751     // Skip empty pages.
1752     if (used_chunks == 0)
1753       continue;
1754 
1755     // Scrape the chunks that are currently used. These should be either in
1756     // state kChunkBeingWritten or kChunkComplete.
1757     for (uint32_t chunk_idx = 0; used_chunks; chunk_idx++, used_chunks >>= 1) {
1758       if (!(used_chunks & 1))
1759         continue;
1760 
1761       SharedMemoryABI::ChunkState state =
1762           SharedMemoryABI::GetChunkStateFromLayout(layout, chunk_idx);
1763       PERFETTO_DCHECK(state == SharedMemoryABI::kChunkBeingWritten ||
1764                       state == SharedMemoryABI::kChunkComplete);
1765       bool chunk_complete = state == SharedMemoryABI::kChunkComplete;
1766 
1767       SharedMemoryABI::Chunk chunk =
1768           abi->GetChunkUnchecked(page_idx, layout, chunk_idx);
1769 
1770       uint16_t packet_count;
1771       uint8_t flags;
1772       // GetPacketCountAndFlags has acquire_load semantics.
1773       std::tie(packet_count, flags) = chunk.GetPacketCountAndFlags();
1774 
1775       // It only makes sense to copy an incomplete chunk if there's at least
1776       // one full packet available. (The producer may not have completed the
1777       // last packet in it yet, so we need at least 2.)
1778       if (!chunk_complete && packet_count < 2)
1779         continue;
1780 
1781       // At this point, it is safe to access the remaining header fields of
1782       // the chunk. Even if the chunk was only just transferred from
1783       // kChunkFree into kChunkBeingWritten state, the header should be
1784       // written completely once the packet count increased above 1 (it was
1785       // reset to 0 by the service when the chunk was freed).
1786 
1787       WriterID writer_id = chunk.writer_id();
1788       base::Optional<BufferID> target_buffer_id =
1789           producer->buffer_id_for_writer(writer_id);
1790 
1791       // We can only scrape this chunk if we know which log buffer to copy it
1792       // into.
1793       if (!target_buffer_id)
1794         continue;
1795 
1796       // Skip chunks that don't belong to the requested tracing session.
1797       bool target_buffer_belongs_to_session =
1798           std::find(session_buffers.begin(), session_buffers.end(),
1799                     *target_buffer_id) != session_buffers.end();
1800       if (!target_buffer_belongs_to_session)
1801         continue;
1802 
1803       uint32_t chunk_id =
1804           chunk.header()->chunk_id.load(std::memory_order_relaxed);
1805 
1806       CopyProducerPageIntoLogBuffer(
1807           producer->id_, producer->uid_, writer_id, chunk_id, *target_buffer_id,
1808           packet_count, flags, chunk_complete, chunk.payload_begin(),
1809           chunk.payload_size());
1810     }
1811   }
1812 }
1813 
FlushAndDisableTracing(TracingSessionID tsid)1814 void TracingServiceImpl::FlushAndDisableTracing(TracingSessionID tsid) {
1815   PERFETTO_DCHECK_THREAD(thread_checker_);
1816   PERFETTO_DLOG("Triggering final flush for %" PRIu64, tsid);
1817   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1818   Flush(tsid, 0, [weak_this, tsid](bool success) {
1819     // This was a DLOG up to Jun 2021 (v16, Android S).
1820     PERFETTO_LOG("FlushAndDisableTracing(%" PRIu64 ") done, success=%d", tsid,
1821                  success);
1822     if (!weak_this)
1823       return;
1824     TracingSession* session = weak_this->GetTracingSession(tsid);
1825     if (session->consumer_maybe_null) {
1826       // If the consumer is still attached, just disable the session but give it
1827       // a chance to read the contents.
1828       weak_this->DisableTracing(tsid);
1829     } else {
1830       // If the consumer detached, destroy the session. If the consumer did
1831       // start the session in long-tracing mode, the service will have saved
1832       // the contents to the passed file. If not, the contents will be
1833       // destroyed.
1834       weak_this->FreeBuffers(tsid);
1835     }
1836   });
1837 }
1838 
PeriodicFlushTask(TracingSessionID tsid,bool post_next_only)1839 void TracingServiceImpl::PeriodicFlushTask(TracingSessionID tsid,
1840                                            bool post_next_only) {
1841   PERFETTO_DCHECK_THREAD(thread_checker_);
1842   TracingSession* tracing_session = GetTracingSession(tsid);
1843   if (!tracing_session || tracing_session->state != TracingSession::STARTED)
1844     return;
1845 
1846   uint32_t flush_period_ms = tracing_session->config.flush_period_ms();
1847   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1848   task_runner_->PostDelayedTask(
1849       [weak_this, tsid] {
1850         if (weak_this)
1851           weak_this->PeriodicFlushTask(tsid, /*post_next_only=*/false);
1852       },
1853       flush_period_ms - static_cast<uint32_t>(base::GetWallTimeMs().count() %
1854                                               flush_period_ms));
1855 
1856   if (post_next_only)
1857     return;
1858 
1859   PERFETTO_DLOG("Triggering periodic flush for trace session %" PRIu64, tsid);
1860   Flush(tsid, 0, [](bool success) {
1861     if (!success)
1862       PERFETTO_ELOG("Periodic flush timed out");
1863   });
1864 }
1865 
PeriodicClearIncrementalStateTask(TracingSessionID tsid,bool post_next_only)1866 void TracingServiceImpl::PeriodicClearIncrementalStateTask(
1867     TracingSessionID tsid,
1868     bool post_next_only) {
1869   PERFETTO_DCHECK_THREAD(thread_checker_);
1870   TracingSession* tracing_session = GetTracingSession(tsid);
1871   if (!tracing_session || tracing_session->state != TracingSession::STARTED)
1872     return;
1873 
1874   uint32_t clear_period_ms =
1875       tracing_session->config.incremental_state_config().clear_period_ms();
1876   auto weak_this = weak_ptr_factory_.GetWeakPtr();
1877   task_runner_->PostDelayedTask(
1878       [weak_this, tsid] {
1879         if (weak_this)
1880           weak_this->PeriodicClearIncrementalStateTask(
1881               tsid, /*post_next_only=*/false);
1882       },
1883       clear_period_ms - static_cast<uint32_t>(base::GetWallTimeMs().count() %
1884                                               clear_period_ms));
1885 
1886   if (post_next_only)
1887     return;
1888 
1889   PERFETTO_DLOG(
1890       "Performing periodic incremental state clear for trace session %" PRIu64,
1891       tsid);
1892 
1893   // Queue the IPCs to producers with active data sources that opted in.
1894   std::map<ProducerID, std::vector<DataSourceInstanceID>> clear_map;
1895   for (const auto& kv : tracing_session->data_source_instances) {
1896     ProducerID producer_id = kv.first;
1897     const DataSourceInstance& data_source = kv.second;
1898     if (data_source.handles_incremental_state_clear)
1899       clear_map[producer_id].push_back(data_source.instance_id);
1900   }
1901 
1902   for (const auto& kv : clear_map) {
1903     ProducerID producer_id = kv.first;
1904     const std::vector<DataSourceInstanceID>& data_sources = kv.second;
1905     ProducerEndpointImpl* producer = GetProducer(producer_id);
1906     if (!producer) {
1907       PERFETTO_DFATAL("Producer does not exist.");
1908       continue;
1909     }
1910     producer->ClearIncrementalState(data_sources);
1911   }
1912 }
1913 
1914 // Note: when this is called to write into a file passed when starting tracing
1915 // |consumer| will be == nullptr (as opposite to the case of a consumer asking
1916 // to send the trace data back over IPC).
ReadBuffers(TracingSessionID tsid,ConsumerEndpointImpl * consumer)1917 bool TracingServiceImpl::ReadBuffers(TracingSessionID tsid,
1918                                      ConsumerEndpointImpl* consumer) {
1919   PERFETTO_DCHECK_THREAD(thread_checker_);
1920   TracingSession* tracing_session = GetTracingSession(tsid);
1921   if (!tracing_session) {
1922     // This will be hit systematically from the PostDelayedTask when directly
1923     // writing into the file (in which case consumer == nullptr). Suppress the
1924     // log in this case as it's just spam.
1925     if (consumer) {
1926       PERFETTO_DLOG("Cannot ReadBuffers(): no tracing session is active");
1927     }
1928     return false;
1929   }
1930 
1931   // When a tracing session is waiting for a trigger it is considered empty. If
1932   // a tracing session finishes and moves into DISABLED without ever receiving a
1933   // trigger the trace should never return any data. This includes the synthetic
1934   // packets like TraceConfig and Clock snapshots. So we bail out early and let
1935   // the consumer know there is no data.
1936   if (!tracing_session->config.trigger_config().triggers().empty() &&
1937       tracing_session->received_triggers.empty() &&
1938       !tracing_session->seized_for_bugreport) {
1939     PERFETTO_DLOG(
1940         "ReadBuffers(): tracing session has not received a trigger yet.");
1941     return false;
1942   }
1943 
1944   // This can happen if the file is closed by a previous task because it reaches
1945   // |max_file_size_bytes|.
1946   if (!tracing_session->write_into_file && !consumer)
1947     return false;
1948 
1949   if (tracing_session->write_into_file && consumer) {
1950     // If the consumer enabled tracing and asked to save the contents into the
1951     // passed file makes little sense to also try to read the buffers over IPC,
1952     // as that would just steal data from the periodic draining task.
1953     PERFETTO_ELOG("Consumer trying to read from write_into_file session.");
1954     return false;
1955   }
1956 
1957   std::vector<TracePacket> packets;
1958   packets.reserve(1024);  // Just an educated guess to avoid trivial expansions.
1959 
1960   // If a bugreport request happened and the trace was stolen for that, give
1961   // an empty trace with a clear signal to the consumer. This deals only with
1962   // the case of readback-from-IPC. A similar code-path deals with the
1963   // write_into_file case in MaybeSaveTraceForBugreport().
1964   if (tracing_session->seized_for_bugreport && consumer) {
1965     if (!tracing_session->config.builtin_data_sources()
1966              .disable_service_events()) {
1967       EmitSeizedForBugreportLifecycleEvent(&packets);
1968     }
1969     EmitLifecycleEvents(tracing_session, &packets);
1970     consumer->consumer_->OnTraceData(std::move(packets), /*has_more=*/false);
1971     return true;
1972   }
1973 
1974   if (!tracing_session->initial_clock_snapshot.empty()) {
1975     EmitClockSnapshot(tracing_session,
1976                       std::move(tracing_session->initial_clock_snapshot),
1977                       &packets);
1978   }
1979 
1980   for (auto& snapshot : tracing_session->clock_snapshot_ring_buffer) {
1981     PERFETTO_DCHECK(!snapshot.empty());
1982     EmitClockSnapshot(tracing_session, std::move(snapshot), &packets);
1983   }
1984   tracing_session->clock_snapshot_ring_buffer.clear();
1985 
1986   if (tracing_session->should_emit_sync_marker) {
1987     EmitSyncMarker(&packets);
1988     tracing_session->should_emit_sync_marker = false;
1989   }
1990 
1991   if (!tracing_session->config.builtin_data_sources().disable_trace_config()) {
1992     MaybeEmitTraceConfig(tracing_session, &packets);
1993     MaybeEmitReceivedTriggers(tracing_session, &packets);
1994   }
1995   if (!tracing_session->config.builtin_data_sources().disable_system_info())
1996     MaybeEmitSystemInfo(tracing_session, &packets);
1997 
1998   // Note that in the proto comment, we guarantee that the tracing_started
1999   // lifecycle event will be emitted before any data packets so make sure to
2000   // keep this before reading the tracing buffers.
2001   if (!tracing_session->config.builtin_data_sources().disable_service_events())
2002     EmitLifecycleEvents(tracing_session, &packets);
2003 
2004   size_t packets_bytes = 0;  // SUM(slice.size() for each slice in |packets|).
2005   size_t total_slices = 0;   // SUM(#slices in |packets|).
2006 
2007   // Add up size for packets added by the Maybe* calls above.
2008   for (const TracePacket& packet : packets) {
2009     packets_bytes += packet.size();
2010     total_slices += packet.slices().size();
2011   }
2012 
2013   // This is a rough threshold to determine how much to read from the buffer in
2014   // each task. This is to avoid executing a single huge sending task for too
2015   // long and risk to hit the watchdog. This is *not* an upper bound: we just
2016   // stop accumulating new packets and PostTask *after* we cross this threshold.
2017   // This constant essentially balances the PostTask and IPC overhead vs the
2018   // responsiveness of the service. An extremely small value will cause one IPC
2019   // and one PostTask for each slice but will keep the service extremely
2020   // responsive. An extremely large value will batch the send for the full
2021   // buffer in one large task, will hit the blocking send() once the socket
2022   // buffers are full and hang the service for a bit (until the consumer
2023   // catches up).
2024   static constexpr size_t kApproxBytesPerTask = 32768;
2025   bool did_hit_threshold = false;
2026 
2027   // TODO(primiano): Extend the ReadBuffers API to allow reading only some
2028   // buffers, not all of them in one go.
2029   for (size_t buf_idx = 0;
2030        buf_idx < tracing_session->num_buffers() && !did_hit_threshold;
2031        buf_idx++) {
2032     auto tbuf_iter = buffers_.find(tracing_session->buffers_index[buf_idx]);
2033     if (tbuf_iter == buffers_.end()) {
2034       PERFETTO_DFATAL("Buffer not found.");
2035       continue;
2036     }
2037     TraceBuffer& tbuf = *tbuf_iter->second;
2038     tbuf.BeginRead();
2039     while (!did_hit_threshold) {
2040       TracePacket packet;
2041       TraceBuffer::PacketSequenceProperties sequence_properties{};
2042       bool previous_packet_dropped;
2043       if (!tbuf.ReadNextTracePacket(&packet, &sequence_properties,
2044                                     &previous_packet_dropped)) {
2045         break;
2046       }
2047       PERFETTO_DCHECK(sequence_properties.producer_id_trusted != 0);
2048       PERFETTO_DCHECK(sequence_properties.writer_id != 0);
2049       PERFETTO_DCHECK(sequence_properties.producer_uid_trusted != kInvalidUid);
2050       PERFETTO_DCHECK(packet.size() > 0);
2051       if (!PacketStreamValidator::Validate(packet.slices())) {
2052         tracing_session->invalid_packets++;
2053         PERFETTO_DLOG("Dropping invalid packet");
2054         continue;
2055       }
2056 
2057       // Append a slice with the trusted field data. This can't be spoofed
2058       // because above we validated that the existing slices don't contain any
2059       // trusted fields. For added safety we append instead of prepending
2060       // because according to protobuf semantics, if the same field is
2061       // encountered multiple times the last instance takes priority. Note that
2062       // truncated packets are also rejected, so the producer can't give us a
2063       // partial packet (e.g., a truncated string) which only becomes valid when
2064       // the trusted data is appended here.
2065       Slice slice = Slice::Allocate(32);
2066       protozero::StaticBuffered<protos::pbzero::TracePacket> trusted_packet(
2067           slice.own_data(), slice.size);
2068       trusted_packet->set_trusted_uid(
2069           static_cast<int32_t>(sequence_properties.producer_uid_trusted));
2070       trusted_packet->set_trusted_packet_sequence_id(
2071           tracing_session->GetPacketSequenceID(
2072               sequence_properties.producer_id_trusted,
2073               sequence_properties.writer_id));
2074       if (previous_packet_dropped)
2075         trusted_packet->set_previous_packet_dropped(previous_packet_dropped);
2076       slice.size = trusted_packet.Finalize();
2077       packet.AddSlice(std::move(slice));
2078 
2079       // Append the packet (inclusive of the trusted uid) to |packets|.
2080       packets_bytes += packet.size();
2081       total_slices += packet.slices().size();
2082       did_hit_threshold = packets_bytes >= kApproxBytesPerTask &&
2083                           !tracing_session->write_into_file;
2084       packets.emplace_back(std::move(packet));
2085     }  // for(packets...)
2086   }    // for(buffers...)
2087 
2088   const bool has_more = did_hit_threshold;
2089 
2090   size_t prev_packets_size = packets.size();
2091   if (!tracing_session->config.builtin_data_sources()
2092            .disable_service_events()) {
2093     // We don't bother snapshotting clocks here because we wouldn't be able to
2094     // emit it and we shouldn't have significant drift from the last snapshot in
2095     // any case.
2096     SnapshotLifecyleEvent(tracing_session,
2097                           protos::pbzero::TracingServiceEvent::
2098                               kReadTracingBuffersCompletedFieldNumber,
2099                           false /* snapshot_clocks */);
2100     EmitLifecycleEvents(tracing_session, &packets);
2101   }
2102 
2103   // Only emit the stats when there is no more trace data is available to read.
2104   // That way, any problems that occur while reading from the buffers are
2105   // reflected in the emitted stats. This is particularly important for use
2106   // cases where ReadBuffers is only ever called after the tracing session is
2107   // stopped.
2108   if (!has_more && tracing_session->should_emit_stats) {
2109     EmitStats(tracing_session, &packets);
2110     tracing_session->should_emit_stats = false;
2111   }
2112 
2113   // Add sizes of packets emitted by the EmitLifecycleEvents + EmitStats.
2114   for (size_t i = prev_packets_size; i < packets.size(); ++i) {
2115     packets_bytes += packets[i].size();
2116     total_slices += packets[i].slices().size();
2117   }
2118 
2119   // +-------------------------------------------------------------------------+
2120   // | NO MORE CHANGES TO |packets| AFTER THIS POINT.                          |
2121   // +-------------------------------------------------------------------------+
2122 
2123   // If the tracing session specified a filter, run all packets through the
2124   // filter and replace them with the filter results.
2125   // The process below mantains the cardinality of input packets. Even if an
2126   // entire packet is filtered out, we emit a zero-sized TracePacket proto. That
2127   // makes debugging and reasoning about the trace stats easier.
2128   // This place swaps the contents of each |packets| entry in place.
2129   if (tracing_session->trace_filter) {
2130     auto& trace_filter = *tracing_session->trace_filter;
2131     // The filter root shoud be reset from protos.Trace to protos.TracePacket
2132     // by the earlier call to SetFilterRoot() in EnableTracing().
2133     PERFETTO_DCHECK(trace_filter.root_msg_index() != 0);
2134     std::vector<protozero::MessageFilter::InputSlice> filter_input;
2135     for (auto it = packets.begin(); it != packets.end(); ++it) {
2136       const auto& packet_slices = it->slices();
2137       filter_input.clear();
2138       filter_input.resize(packet_slices.size());
2139       ++tracing_session->filter_input_packets;
2140       tracing_session->filter_input_bytes += it->size();
2141       for (size_t i = 0; i < packet_slices.size(); ++i)
2142         filter_input[i] = {packet_slices[i].start, packet_slices[i].size};
2143       auto filtered_packet = trace_filter.FilterMessageFragments(
2144           &filter_input[0], filter_input.size());
2145 
2146       // Replace the packet in-place with the filtered one (unless failed).
2147       *it = TracePacket();
2148       if (filtered_packet.error) {
2149         ++tracing_session->filter_errors;
2150         PERFETTO_DLOG("Trace packet filtering failed @ packet %" PRIu64,
2151                       tracing_session->filter_input_packets);
2152         continue;
2153       }
2154       tracing_session->filter_output_bytes += filtered_packet.size;
2155       it->AddSlice(Slice::TakeOwnership(std::move(filtered_packet.data),
2156                                         filtered_packet.size));
2157 
2158     }  // for (packet)
2159   }    // if (trace_filter)
2160 
2161   // If the caller asked us to write into a file by setting
2162   // |write_into_file| == true in the trace config, drain the packets read
2163   // (if any) into the given file descriptor.
2164   if (tracing_session->write_into_file) {
2165     const uint64_t max_size = tracing_session->max_file_size_bytes
2166                                   ? tracing_session->max_file_size_bytes
2167                                   : std::numeric_limits<size_t>::max();
2168 
2169     // When writing into a file, the file should look like a root trace.proto
2170     // message. Each packet should be prepended with a proto preamble stating
2171     // its field id (within trace.proto) and size. Hence the addition below.
2172     const size_t max_iovecs = total_slices + packets.size();
2173 
2174     size_t num_iovecs = 0;
2175     bool stop_writing_into_file = tracing_session->write_period_ms == 0;
2176     std::unique_ptr<struct iovec[]> iovecs(new struct iovec[max_iovecs]);
2177     size_t num_iovecs_at_last_packet = 0;
2178     uint64_t bytes_about_to_be_written = 0;
2179     for (TracePacket& packet : packets) {
2180       std::tie(iovecs[num_iovecs].iov_base, iovecs[num_iovecs].iov_len) =
2181           packet.GetProtoPreamble();
2182       bytes_about_to_be_written += iovecs[num_iovecs].iov_len;
2183       num_iovecs++;
2184       for (const Slice& slice : packet.slices()) {
2185         // writev() doesn't change the passed pointer. However, struct iovec
2186         // take a non-const ptr because it's the same struct used by readv().
2187         // Hence the const_cast here.
2188         char* start = static_cast<char*>(const_cast<void*>(slice.start));
2189         bytes_about_to_be_written += slice.size;
2190         iovecs[num_iovecs++] = {start, slice.size};
2191       }
2192 
2193       if (tracing_session->bytes_written_into_file +
2194               bytes_about_to_be_written >=
2195           max_size) {
2196         stop_writing_into_file = true;
2197         num_iovecs = num_iovecs_at_last_packet;
2198         break;
2199       }
2200 
2201       num_iovecs_at_last_packet = num_iovecs;
2202     }
2203     PERFETTO_DCHECK(num_iovecs <= max_iovecs);
2204     int fd = *tracing_session->write_into_file;
2205 
2206     uint64_t total_wr_size = 0;
2207 
2208     // writev() can take at most IOV_MAX entries per call. Batch them.
2209     constexpr size_t kIOVMax = IOV_MAX;
2210     for (size_t i = 0; i < num_iovecs; i += kIOVMax) {
2211       int iov_batch_size = static_cast<int>(std::min(num_iovecs - i, kIOVMax));
2212       ssize_t wr_size = PERFETTO_EINTR(writev(fd, &iovecs[i], iov_batch_size));
2213       if (wr_size <= 0) {
2214         PERFETTO_PLOG("writev() failed");
2215         stop_writing_into_file = true;
2216         break;
2217       }
2218       total_wr_size += static_cast<size_t>(wr_size);
2219     }
2220 
2221     tracing_session->bytes_written_into_file += total_wr_size;
2222 
2223     PERFETTO_DLOG("Draining into file, written: %" PRIu64 " KB, stop: %d",
2224                   (total_wr_size + 1023) / 1024, stop_writing_into_file);
2225     if (stop_writing_into_file) {
2226       // Ensure all data was written to the file before we close it.
2227       base::FlushFile(fd);
2228       tracing_session->write_into_file.reset();
2229       tracing_session->write_period_ms = 0;
2230       if (tracing_session->state == TracingSession::STARTED)
2231         DisableTracing(tsid);
2232       return true;
2233     }
2234 
2235     auto weak_this = weak_ptr_factory_.GetWeakPtr();
2236     task_runner_->PostDelayedTask(
2237         [weak_this, tsid] {
2238           if (weak_this)
2239             weak_this->ReadBuffers(tsid, nullptr);
2240         },
2241         tracing_session->delay_to_next_write_period_ms());
2242     return true;
2243   }  // if (tracing_session->write_into_file)
2244 
2245   if (has_more) {
2246     auto weak_consumer = consumer->weak_ptr_factory_.GetWeakPtr();
2247     auto weak_this = weak_ptr_factory_.GetWeakPtr();
2248     task_runner_->PostTask([weak_this, weak_consumer, tsid] {
2249       if (!weak_this || !weak_consumer)
2250         return;
2251       weak_this->ReadBuffers(tsid, weak_consumer.get());
2252     });
2253   }
2254 
2255   // Keep this as tail call, just in case the consumer re-enters.
2256   consumer->consumer_->OnTraceData(std::move(packets), has_more);
2257   return true;
2258 }
2259 
FreeBuffers(TracingSessionID tsid)2260 void TracingServiceImpl::FreeBuffers(TracingSessionID tsid) {
2261   PERFETTO_DCHECK_THREAD(thread_checker_);
2262   PERFETTO_DLOG("Freeing buffers for session %" PRIu64, tsid);
2263   TracingSession* tracing_session = GetTracingSession(tsid);
2264   if (!tracing_session) {
2265     PERFETTO_DLOG("FreeBuffers() failed, invalid session ID %" PRIu64, tsid);
2266     return;  // TODO(primiano): signal failure?
2267   }
2268   DisableTracing(tsid, /*disable_immediately=*/true);
2269 
2270   PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
2271   tracing_session->data_source_instances.clear();
2272 
2273   for (auto& producer_entry : producers_) {
2274     ProducerEndpointImpl* producer = producer_entry.second;
2275     producer->OnFreeBuffers(tracing_session->buffers_index);
2276   }
2277 
2278   for (BufferID buffer_id : tracing_session->buffers_index) {
2279     buffer_ids_.Free(buffer_id);
2280     PERFETTO_DCHECK(buffers_.count(buffer_id) == 1);
2281     buffers_.erase(buffer_id);
2282   }
2283   bool notify_traceur = tracing_session->config.notify_traceur();
2284   bool is_long_trace =
2285       (tracing_session->config.write_into_file() &&
2286        tracing_session->config.file_write_period_ms() < kMillisPerDay);
2287   bool seized_for_bugreport = tracing_session->seized_for_bugreport;
2288   tracing_sessions_.erase(tsid);
2289   tracing_session = nullptr;
2290   UpdateMemoryGuardrail();
2291 
2292   PERFETTO_LOG("Tracing session %" PRIu64 " ended, total sessions:%zu", tsid,
2293                tracing_sessions_.size());
2294 
2295 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD) && \
2296     PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
2297   if (notify_traceur && (seized_for_bugreport || is_long_trace)) {
2298     PERFETTO_LAZY_LOAD(android_internal::NotifyTraceSessionEnded, notify_fn);
2299     if (!notify_fn || !notify_fn(seized_for_bugreport))
2300       PERFETTO_ELOG("Failed to notify Traceur long tracing has ended");
2301   }
2302 #else
2303   base::ignore_result(notify_traceur);
2304   base::ignore_result(is_long_trace);
2305   base::ignore_result(seized_for_bugreport);
2306 #endif
2307 }
2308 
RegisterDataSource(ProducerID producer_id,const DataSourceDescriptor & desc)2309 void TracingServiceImpl::RegisterDataSource(ProducerID producer_id,
2310                                             const DataSourceDescriptor& desc) {
2311   PERFETTO_DCHECK_THREAD(thread_checker_);
2312   PERFETTO_DLOG("Producer %" PRIu16 " registered data source \"%s\"",
2313                 producer_id, desc.name().c_str());
2314 
2315   PERFETTO_DCHECK(!desc.name().empty());
2316   auto reg_ds = data_sources_.emplace(desc.name(),
2317                                       RegisteredDataSource{producer_id, desc});
2318 
2319   // If there are existing tracing sessions, we need to check if the new
2320   // data source is enabled by any of them.
2321   if (tracing_sessions_.empty())
2322     return;
2323 
2324   ProducerEndpointImpl* producer = GetProducer(producer_id);
2325   if (!producer) {
2326     PERFETTO_DFATAL("Producer not found.");
2327     return;
2328   }
2329 
2330   for (auto& iter : tracing_sessions_) {
2331     TracingSession& tracing_session = iter.second;
2332     if (tracing_session.state != TracingSession::STARTED &&
2333         tracing_session.state != TracingSession::CONFIGURED) {
2334       continue;
2335     }
2336 
2337     TraceConfig::ProducerConfig producer_config;
2338     for (auto& config : tracing_session.config.producers()) {
2339       if (producer->name_ == config.producer_name()) {
2340         producer_config = config;
2341         break;
2342       }
2343     }
2344     for (const TraceConfig::DataSource& cfg_data_source :
2345          tracing_session.config.data_sources()) {
2346       if (cfg_data_source.config().name() != desc.name())
2347         continue;
2348       DataSourceInstance* ds_inst = SetupDataSource(
2349           cfg_data_source, producer_config, reg_ds->second, &tracing_session);
2350       if (ds_inst && tracing_session.state == TracingSession::STARTED)
2351         StartDataSourceInstance(producer, &tracing_session, ds_inst);
2352     }
2353   }
2354 }
2355 
StopDataSourceInstance(ProducerEndpointImpl * producer,TracingSession * tracing_session,DataSourceInstance * instance,bool disable_immediately)2356 void TracingServiceImpl::StopDataSourceInstance(ProducerEndpointImpl* producer,
2357                                                 TracingSession* tracing_session,
2358                                                 DataSourceInstance* instance,
2359                                                 bool disable_immediately) {
2360   const DataSourceInstanceID ds_inst_id = instance->instance_id;
2361   if (instance->will_notify_on_stop && !disable_immediately) {
2362     instance->state = DataSourceInstance::STOPPING;
2363   } else {
2364     instance->state = DataSourceInstance::STOPPED;
2365   }
2366   if (tracing_session->consumer_maybe_null) {
2367     tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
2368         *producer, *instance);
2369   }
2370   producer->StopDataSource(ds_inst_id);
2371 }
2372 
UnregisterDataSource(ProducerID producer_id,const std::string & name)2373 void TracingServiceImpl::UnregisterDataSource(ProducerID producer_id,
2374                                               const std::string& name) {
2375   PERFETTO_DCHECK_THREAD(thread_checker_);
2376   PERFETTO_DLOG("Producer %" PRIu16 " unregistered data source \"%s\"",
2377                 producer_id, name.c_str());
2378   PERFETTO_CHECK(producer_id);
2379   ProducerEndpointImpl* producer = GetProducer(producer_id);
2380   PERFETTO_DCHECK(producer);
2381   for (auto& kv : tracing_sessions_) {
2382     auto& ds_instances = kv.second.data_source_instances;
2383     bool removed = false;
2384     for (auto it = ds_instances.begin(); it != ds_instances.end();) {
2385       if (it->first == producer_id && it->second.data_source_name == name) {
2386         DataSourceInstanceID ds_inst_id = it->second.instance_id;
2387         if (it->second.state != DataSourceInstance::STOPPED) {
2388           if (it->second.state != DataSourceInstance::STOPPING) {
2389             StopDataSourceInstance(producer, &kv.second, &it->second,
2390                                    /* disable_immediately = */ false);
2391           }
2392 
2393           // Mark the instance as stopped immediately, since we are
2394           // unregistering it below.
2395           //
2396           //  The StopDataSourceInstance above might have set the state to
2397           //  STOPPING so this condition isn't an else.
2398           if (it->second.state == DataSourceInstance::STOPPING)
2399             NotifyDataSourceStopped(producer_id, ds_inst_id);
2400         }
2401         it = ds_instances.erase(it);
2402         removed = true;
2403       } else {
2404         ++it;
2405       }
2406     }  // for (data_source_instances)
2407     if (removed)
2408       MaybeNotifyAllDataSourcesStarted(&kv.second);
2409   }  // for (tracing_session)
2410 
2411   for (auto it = data_sources_.begin(); it != data_sources_.end(); ++it) {
2412     if (it->second.producer_id == producer_id &&
2413         it->second.descriptor.name() == name) {
2414       data_sources_.erase(it);
2415       return;
2416     }
2417   }
2418 
2419   PERFETTO_DFATAL(
2420       "Tried to unregister a non-existent data source \"%s\" for "
2421       "producer %" PRIu16,
2422       name.c_str(), producer_id);
2423 }
2424 
SetupDataSource(const TraceConfig::DataSource & cfg_data_source,const TraceConfig::ProducerConfig & producer_config,const RegisteredDataSource & data_source,TracingSession * tracing_session)2425 TracingServiceImpl::DataSourceInstance* TracingServiceImpl::SetupDataSource(
2426     const TraceConfig::DataSource& cfg_data_source,
2427     const TraceConfig::ProducerConfig& producer_config,
2428     const RegisteredDataSource& data_source,
2429     TracingSession* tracing_session) {
2430   PERFETTO_DCHECK_THREAD(thread_checker_);
2431   ProducerEndpointImpl* producer = GetProducer(data_source.producer_id);
2432   PERFETTO_DCHECK(producer);
2433   // An existing producer that is not ftrace could have registered itself as
2434   // ftrace, we must not enable it in that case.
2435   if (lockdown_mode_ && producer->uid_ != uid_) {
2436     PERFETTO_DLOG("Lockdown mode: not enabling producer %hu", producer->id_);
2437     return nullptr;
2438   }
2439   // TODO(primiano): Add tests for registration ordering (data sources vs
2440   // consumers).
2441   if (!NameMatchesFilter(producer->name_,
2442                          cfg_data_source.producer_name_filter(),
2443                          cfg_data_source.producer_name_regex_filter())) {
2444     PERFETTO_DLOG("Data source: %s is filtered out for producer: %s",
2445                   cfg_data_source.config().name().c_str(),
2446                   producer->name_.c_str());
2447     return nullptr;
2448   }
2449 
2450   auto relative_buffer_id = cfg_data_source.config().target_buffer();
2451   if (relative_buffer_id >= tracing_session->num_buffers()) {
2452     PERFETTO_LOG(
2453         "The TraceConfig for DataSource %s specified a target_buffer out of "
2454         "bound (%d). Skipping it.",
2455         cfg_data_source.config().name().c_str(), relative_buffer_id);
2456     return nullptr;
2457   }
2458 
2459   // Create a copy of the DataSourceConfig specified in the trace config. This
2460   // will be passed to the producer after translating the |target_buffer| id.
2461   // The |target_buffer| parameter passed by the consumer in the trace config is
2462   // relative to the buffers declared in the same trace config. This has to be
2463   // translated to the global BufferID before passing it to the producers, which
2464   // don't know anything about tracing sessions and consumers.
2465 
2466   DataSourceInstanceID inst_id = ++last_data_source_instance_id_;
2467   auto insert_iter = tracing_session->data_source_instances.emplace(
2468       std::piecewise_construct,  //
2469       std::forward_as_tuple(producer->id_),
2470       std::forward_as_tuple(
2471           inst_id,
2472           cfg_data_source.config(),  //  Deliberate copy.
2473           data_source.descriptor.name(),
2474           data_source.descriptor.will_notify_on_start(),
2475           data_source.descriptor.will_notify_on_stop(),
2476           data_source.descriptor.handles_incremental_state_clear()));
2477   DataSourceInstance* ds_instance = &insert_iter->second;
2478 
2479   // New data source instance starts out in CONFIGURED state.
2480   if (tracing_session->consumer_maybe_null) {
2481     tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
2482         *producer, *ds_instance);
2483   }
2484 
2485   DataSourceConfig& ds_config = ds_instance->config;
2486   ds_config.set_trace_duration_ms(tracing_session->config.duration_ms());
2487   ds_config.set_stop_timeout_ms(tracing_session->data_source_stop_timeout_ms());
2488   ds_config.set_enable_extra_guardrails(
2489       tracing_session->config.enable_extra_guardrails());
2490   if (tracing_session->consumer_uid == 1066 /* AID_STATSD */ &&
2491       tracing_session->config.statsd_metadata().triggering_config_uid() !=
2492           2000 /* AID_SHELL */
2493       && tracing_session->config.statsd_metadata().triggering_config_uid() !=
2494              0 /* AID_ROOT */) {
2495     // StatsD can be triggered either by shell, root or an app that has DUMP and
2496     // USAGE_STATS permission. When triggered by shell or root, we do not want
2497     // to consider the trace a trusted system trace, as it was initiated by the
2498     // user. Otherwise, it has to come from an app with DUMP and
2499     // PACKAGE_USAGE_STATS, which has to be preinstalled and trusted by the
2500     // system.
2501     // Check for shell / root: https://bit.ly/3b7oZNi
2502     // Check for DUMP or PACKAGE_USAGE_STATS: https://bit.ly/3ep0NrR
2503     ds_config.set_session_initiator(
2504         DataSourceConfig::SESSION_INITIATOR_TRUSTED_SYSTEM);
2505   } else {
2506     // Unset in case the consumer set it.
2507     // We need to be able to trust this field.
2508     ds_config.set_session_initiator(
2509         DataSourceConfig::SESSION_INITIATOR_UNSPECIFIED);
2510   }
2511   ds_config.set_tracing_session_id(tracing_session->id);
2512   BufferID global_id = tracing_session->buffers_index[relative_buffer_id];
2513   PERFETTO_DCHECK(global_id);
2514   ds_config.set_target_buffer(global_id);
2515 
2516   PERFETTO_DLOG("Setting up data source %s with target buffer %" PRIu16,
2517                 ds_config.name().c_str(), global_id);
2518   if (!producer->shared_memory()) {
2519     // Determine the SMB page size. Must be an integer multiple of 4k.
2520     // As for the SMB size below, the decision tree is as follows:
2521     // 1. Give priority to what is defined in the trace config.
2522     // 2. If unset give priority to the hint passed by the producer.
2523     // 3. Keep within bounds and ensure it's a multiple of 4k.
2524     size_t page_size = producer_config.page_size_kb() * 1024;
2525     if (page_size == 0)
2526       page_size = producer->shmem_page_size_hint_bytes_;
2527 
2528     // Determine the SMB size. Must be an integer multiple of the SMB page size.
2529     // The decision tree is as follows:
2530     // 1. Give priority to what defined in the trace config.
2531     // 2. If unset give priority to the hint passed by the producer.
2532     // 3. Keep within bounds and ensure it's a multiple of the page size.
2533     size_t shm_size = producer_config.shm_size_kb() * 1024;
2534     if (shm_size == 0)
2535       shm_size = producer->shmem_size_hint_bytes_;
2536 
2537     auto valid_sizes = EnsureValidShmSizes(shm_size, page_size);
2538     if (valid_sizes != std::tie(shm_size, page_size)) {
2539       PERFETTO_DLOG(
2540           "Invalid configured SMB sizes: shm_size %zu page_size %zu. Falling "
2541           "back to shm_size %zu page_size %zu.",
2542           shm_size, page_size, std::get<0>(valid_sizes),
2543           std::get<1>(valid_sizes));
2544     }
2545     std::tie(shm_size, page_size) = valid_sizes;
2546 
2547     // TODO(primiano): right now Create() will suicide in case of OOM if the
2548     // mmap fails. We should instead gracefully fail the request and tell the
2549     // client to go away.
2550     PERFETTO_DLOG("Creating SMB of %zu KB for producer \"%s\"", shm_size / 1024,
2551                   producer->name_.c_str());
2552     auto shared_memory = shm_factory_->CreateSharedMemory(shm_size);
2553     producer->SetupSharedMemory(std::move(shared_memory), page_size,
2554                                 /*provided_by_producer=*/false);
2555   }
2556   producer->SetupDataSource(inst_id, ds_config);
2557   return ds_instance;
2558 }
2559 
2560 // Note: all the fields % *_trusted ones are untrusted, as in, the Producer
2561 // might be lying / returning garbage contents. |src| and |size| can be trusted
2562 // in terms of being a valid pointer, but not the contents.
CopyProducerPageIntoLogBuffer(ProducerID producer_id_trusted,uid_t producer_uid_trusted,WriterID writer_id,ChunkID chunk_id,BufferID buffer_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)2563 void TracingServiceImpl::CopyProducerPageIntoLogBuffer(
2564     ProducerID producer_id_trusted,
2565     uid_t producer_uid_trusted,
2566     WriterID writer_id,
2567     ChunkID chunk_id,
2568     BufferID buffer_id,
2569     uint16_t num_fragments,
2570     uint8_t chunk_flags,
2571     bool chunk_complete,
2572     const uint8_t* src,
2573     size_t size) {
2574   PERFETTO_DCHECK_THREAD(thread_checker_);
2575 
2576   ProducerEndpointImpl* producer = GetProducer(producer_id_trusted);
2577   if (!producer) {
2578     PERFETTO_DFATAL("Producer not found.");
2579     chunks_discarded_++;
2580     return;
2581   }
2582 
2583   TraceBuffer* buf = GetBufferByID(buffer_id);
2584   if (!buf) {
2585     PERFETTO_DLOG("Could not find target buffer %" PRIu16
2586                   " for producer %" PRIu16,
2587                   buffer_id, producer_id_trusted);
2588     chunks_discarded_++;
2589     return;
2590   }
2591 
2592   // Verify that the producer is actually allowed to write into the target
2593   // buffer specified in the request. This prevents a malicious producer from
2594   // injecting data into a log buffer that belongs to a tracing session the
2595   // producer is not part of.
2596   if (!producer->is_allowed_target_buffer(buffer_id)) {
2597     PERFETTO_ELOG("Producer %" PRIu16
2598                   " tried to write into forbidden target buffer %" PRIu16,
2599                   producer_id_trusted, buffer_id);
2600     PERFETTO_DFATAL("Forbidden target buffer");
2601     chunks_discarded_++;
2602     return;
2603   }
2604 
2605   // If the writer was registered by the producer, it should only write into the
2606   // buffer it was registered with.
2607   base::Optional<BufferID> associated_buffer =
2608       producer->buffer_id_for_writer(writer_id);
2609   if (associated_buffer && *associated_buffer != buffer_id) {
2610     PERFETTO_ELOG("Writer %" PRIu16 " of producer %" PRIu16
2611                   " was registered to write into target buffer %" PRIu16
2612                   ", but tried to write into buffer %" PRIu16,
2613                   writer_id, producer_id_trusted, *associated_buffer,
2614                   buffer_id);
2615     PERFETTO_DFATAL("Wrong target buffer");
2616     chunks_discarded_++;
2617     return;
2618   }
2619 
2620   buf->CopyChunkUntrusted(producer_id_trusted, producer_uid_trusted, writer_id,
2621                           chunk_id, num_fragments, chunk_flags, chunk_complete,
2622                           src, size);
2623 }
2624 
ApplyChunkPatches(ProducerID producer_id_trusted,const std::vector<CommitDataRequest::ChunkToPatch> & chunks_to_patch)2625 void TracingServiceImpl::ApplyChunkPatches(
2626     ProducerID producer_id_trusted,
2627     const std::vector<CommitDataRequest::ChunkToPatch>& chunks_to_patch) {
2628   PERFETTO_DCHECK_THREAD(thread_checker_);
2629 
2630   for (const auto& chunk : chunks_to_patch) {
2631     const ChunkID chunk_id = static_cast<ChunkID>(chunk.chunk_id());
2632     const WriterID writer_id = static_cast<WriterID>(chunk.writer_id());
2633     TraceBuffer* buf =
2634         GetBufferByID(static_cast<BufferID>(chunk.target_buffer()));
2635     static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
2636                   "Add a '|| chunk_id > kMaxChunkID' below if this fails");
2637     if (!writer_id || writer_id > kMaxWriterID || !buf) {
2638       // This can genuinely happen when the trace is stopped. The producers
2639       // might see the stop signal with some delay and try to keep sending
2640       // patches left soon after.
2641       PERFETTO_DLOG(
2642           "Received invalid chunks_to_patch request from Producer: %" PRIu16
2643           ", BufferID: %" PRIu32 " ChunkdID: %" PRIu32 " WriterID: %" PRIu16,
2644           producer_id_trusted, chunk.target_buffer(), chunk_id, writer_id);
2645       patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
2646       continue;
2647     }
2648 
2649     // Note, there's no need to validate that the producer is allowed to write
2650     // to the specified buffer ID (or that it's the correct buffer ID for a
2651     // registered TraceWriter). That's because TraceBuffer uses the producer ID
2652     // and writer ID to look up the chunk to patch. If the producer specifies an
2653     // incorrect buffer, this lookup will fail and TraceBuffer will ignore the
2654     // patches. Because the producer ID is trusted, there's also no way for a
2655     // malicious producer to patch another producer's data.
2656 
2657     // Speculate on the fact that there are going to be a limited amount of
2658     // patches per request, so we can allocate the |patches| array on the stack.
2659     std::array<TraceBuffer::Patch, 1024> patches;  // Uninitialized.
2660     if (chunk.patches().size() > patches.size()) {
2661       PERFETTO_ELOG("Too many patches (%zu) batched in the same request",
2662                     patches.size());
2663       PERFETTO_DFATAL("Too many patches");
2664       patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
2665       continue;
2666     }
2667 
2668     size_t i = 0;
2669     for (const auto& patch : chunk.patches()) {
2670       const std::string& patch_data = patch.data();
2671       if (patch_data.size() != patches[i].data.size()) {
2672         PERFETTO_ELOG("Received patch from producer: %" PRIu16
2673                       " of unexpected size %zu",
2674                       producer_id_trusted, patch_data.size());
2675         patches_discarded_++;
2676         continue;
2677       }
2678       patches[i].offset_untrusted = patch.offset();
2679       memcpy(&patches[i].data[0], patch_data.data(), patches[i].data.size());
2680       i++;
2681     }
2682     buf->TryPatchChunkContents(producer_id_trusted, writer_id, chunk_id,
2683                                &patches[0], i, chunk.has_more_patches());
2684   }
2685 }
2686 
GetDetachedSession(uid_t uid,const std::string & key)2687 TracingServiceImpl::TracingSession* TracingServiceImpl::GetDetachedSession(
2688     uid_t uid,
2689     const std::string& key) {
2690   PERFETTO_DCHECK_THREAD(thread_checker_);
2691   for (auto& kv : tracing_sessions_) {
2692     TracingSession* session = &kv.second;
2693     if (session->consumer_uid == uid && session->detach_key == key) {
2694       PERFETTO_DCHECK(session->consumer_maybe_null == nullptr);
2695       return session;
2696     }
2697   }
2698   return nullptr;
2699 }
2700 
GetTracingSession(TracingSessionID tsid)2701 TracingServiceImpl::TracingSession* TracingServiceImpl::GetTracingSession(
2702     TracingSessionID tsid) {
2703   PERFETTO_DCHECK_THREAD(thread_checker_);
2704   auto it = tsid ? tracing_sessions_.find(tsid) : tracing_sessions_.end();
2705   if (it == tracing_sessions_.end())
2706     return nullptr;
2707   return &it->second;
2708 }
2709 
GetNextProducerID()2710 ProducerID TracingServiceImpl::GetNextProducerID() {
2711   PERFETTO_DCHECK_THREAD(thread_checker_);
2712   PERFETTO_CHECK(producers_.size() < kMaxProducerID);
2713   do {
2714     ++last_producer_id_;
2715   } while (producers_.count(last_producer_id_) || last_producer_id_ == 0);
2716   PERFETTO_DCHECK(last_producer_id_ > 0 && last_producer_id_ <= kMaxProducerID);
2717   return last_producer_id_;
2718 }
2719 
GetBufferByID(BufferID buffer_id)2720 TraceBuffer* TracingServiceImpl::GetBufferByID(BufferID buffer_id) {
2721   auto buf_iter = buffers_.find(buffer_id);
2722   if (buf_iter == buffers_.end())
2723     return nullptr;
2724   return &*buf_iter->second;
2725 }
2726 
OnStartTriggersTimeout(TracingSessionID tsid)2727 void TracingServiceImpl::OnStartTriggersTimeout(TracingSessionID tsid) {
2728   // Skip entirely the flush if the trace session doesn't exist anymore.
2729   // This is to prevent misleading error messages to be logged.
2730   //
2731   // if the trace has started from the trigger we rely on
2732   // the |stop_delay_ms| from the trigger so don't flush and
2733   // disable if we've moved beyond a CONFIGURED state
2734   auto* tracing_session_ptr = GetTracingSession(tsid);
2735   if (tracing_session_ptr &&
2736       tracing_session_ptr->state == TracingSession::CONFIGURED) {
2737     PERFETTO_DLOG("Disabling TracingSession %" PRIu64
2738                   " since no triggers activated.",
2739                   tsid);
2740     // No data should be returned from ReadBuffers() regardless of if we
2741     // call FreeBuffers() or DisableTracing(). This is because in
2742     // STOP_TRACING we need this promise in either case, and using
2743     // DisableTracing() allows a graceful shutdown. Consumers can follow
2744     // their normal path and check the buffers through ReadBuffers() and
2745     // the code won't hang because the tracing session will still be
2746     // alive just disabled.
2747     DisableTracing(tsid);
2748   }
2749 }
2750 
UpdateMemoryGuardrail()2751 void TracingServiceImpl::UpdateMemoryGuardrail() {
2752 #if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
2753   uint64_t total_buffer_bytes = 0;
2754 
2755   // Sum up all the shared memory buffers.
2756   for (const auto& id_to_producer : producers_) {
2757     if (id_to_producer.second->shared_memory())
2758       total_buffer_bytes += id_to_producer.second->shared_memory()->size();
2759   }
2760 
2761   // Sum up all the trace buffers.
2762   for (const auto& id_to_buffer : buffers_) {
2763     total_buffer_bytes += id_to_buffer.second->size();
2764   }
2765 
2766   // Set the guard rail to 32MB + the sum of all the buffers over a 30 second
2767   // interval.
2768   uint64_t guardrail = base::kWatchdogDefaultMemorySlack + total_buffer_bytes;
2769   base::Watchdog::GetInstance()->SetMemoryLimit(guardrail, 30 * 1000);
2770 #endif
2771 }
2772 
PeriodicSnapshotTask(TracingSessionID tsid)2773 void TracingServiceImpl::PeriodicSnapshotTask(TracingSessionID tsid) {
2774   auto* tracing_session = GetTracingSession(tsid);
2775   if (!tracing_session)
2776     return;
2777   if (tracing_session->state != TracingSession::STARTED)
2778     return;
2779   tracing_session->should_emit_sync_marker = true;
2780   tracing_session->should_emit_stats = true;
2781   MaybeSnapshotClocksIntoRingBuffer(tracing_session);
2782 }
2783 
SnapshotLifecyleEvent(TracingSession * tracing_session,uint32_t field_id,bool snapshot_clocks)2784 void TracingServiceImpl::SnapshotLifecyleEvent(TracingSession* tracing_session,
2785                                                uint32_t field_id,
2786                                                bool snapshot_clocks) {
2787   // field_id should be an id of a field in TracingServiceEvent.
2788   auto& lifecycle_events = tracing_session->lifecycle_events;
2789   auto event_it =
2790       std::find_if(lifecycle_events.begin(), lifecycle_events.end(),
2791                    [field_id](const TracingSession::LifecycleEvent& event) {
2792                      return event.field_id == field_id;
2793                    });
2794 
2795   TracingSession::LifecycleEvent* event;
2796   if (event_it == lifecycle_events.end()) {
2797     lifecycle_events.emplace_back(field_id);
2798     event = &lifecycle_events.back();
2799   } else {
2800     event = &*event_it;
2801   }
2802 
2803   // Snapshot the clocks before capturing the timestamp for the event so we can
2804   // use this snapshot to resolve the event timestamp if necessary.
2805   if (snapshot_clocks)
2806     MaybeSnapshotClocksIntoRingBuffer(tracing_session);
2807 
2808   // Erase before emplacing to prevent a unncessary doubling of memory if
2809   // not needed.
2810   if (event->timestamps.size() >= event->max_size) {
2811     event->timestamps.erase_front(1 + event->timestamps.size() -
2812                                   event->max_size);
2813   }
2814   event->timestamps.emplace_back(base::GetBootTimeNs().count());
2815 }
2816 
MaybeSnapshotClocksIntoRingBuffer(TracingSession * tracing_session)2817 void TracingServiceImpl::MaybeSnapshotClocksIntoRingBuffer(
2818     TracingSession* tracing_session) {
2819   if (tracing_session->config.builtin_data_sources()
2820           .disable_clock_snapshotting()) {
2821     return;
2822   }
2823 
2824   // We are making an explicit copy of the latest snapshot (if it exists)
2825   // because SnapshotClocks reads this data and computes the drift based on its
2826   // content. If the clock drift is high enough, it will update the contents of
2827   // |snapshot| and return true. Otherwise, it will return false.
2828   TracingSession::ClockSnapshotData snapshot =
2829       tracing_session->clock_snapshot_ring_buffer.empty()
2830           ? TracingSession::ClockSnapshotData()
2831           : tracing_session->clock_snapshot_ring_buffer.back();
2832   bool did_update = SnapshotClocks(&snapshot);
2833   if (did_update) {
2834     // This means clocks drifted enough since last snapshot. See the comment
2835     // in SnapshotClocks.
2836     auto* snapshot_buffer = &tracing_session->clock_snapshot_ring_buffer;
2837 
2838     // Erase before emplacing to prevent a unncessary doubling of memory if
2839     // not needed.
2840     static constexpr uint32_t kClockSnapshotRingBufferSize = 16;
2841     if (snapshot_buffer->size() >= kClockSnapshotRingBufferSize) {
2842       snapshot_buffer->erase_front(1 + snapshot_buffer->size() -
2843                                    kClockSnapshotRingBufferSize);
2844     }
2845     snapshot_buffer->emplace_back(std::move(snapshot));
2846   }
2847 }
2848 
2849 // Returns true when the data in |snapshot_data| is updated with the new state
2850 // of the clocks and false otherwise.
SnapshotClocks(TracingSession::ClockSnapshotData * snapshot_data)2851 bool TracingServiceImpl::SnapshotClocks(
2852     TracingSession::ClockSnapshotData* snapshot_data) {
2853   // Minimum drift that justifies replacing a prior clock snapshot that hasn't
2854   // been emitted into the trace yet (see comment below).
2855   static constexpr int64_t kSignificantDriftNs = 10 * 1000 * 1000;  // 10 ms
2856 
2857   TracingSession::ClockSnapshotData new_snapshot_data;
2858 
2859 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
2860     !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) &&   \
2861     !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
2862   struct {
2863     clockid_t id;
2864     protos::pbzero::BuiltinClock type;
2865     struct timespec ts;
2866   } clocks[] = {
2867       {CLOCK_BOOTTIME, protos::pbzero::BUILTIN_CLOCK_BOOTTIME, {0, 0}},
2868       {CLOCK_REALTIME_COARSE,
2869        protos::pbzero::BUILTIN_CLOCK_REALTIME_COARSE,
2870        {0, 0}},
2871       {CLOCK_MONOTONIC_COARSE,
2872        protos::pbzero::BUILTIN_CLOCK_MONOTONIC_COARSE,
2873        {0, 0}},
2874       {CLOCK_REALTIME, protos::pbzero::BUILTIN_CLOCK_REALTIME, {0, 0}},
2875       {CLOCK_MONOTONIC, protos::pbzero::BUILTIN_CLOCK_MONOTONIC, {0, 0}},
2876       {CLOCK_MONOTONIC_RAW,
2877        protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW,
2878        {0, 0}},
2879   };
2880   // First snapshot all the clocks as atomically as we can.
2881   for (auto& clock : clocks) {
2882     if (clock_gettime(clock.id, &clock.ts) == -1)
2883       PERFETTO_DLOG("clock_gettime failed for clock %d", clock.id);
2884   }
2885   for (auto& clock : clocks) {
2886     new_snapshot_data.push_back(std::make_pair(
2887         static_cast<uint32_t>(clock.type),
2888         static_cast<uint64_t>(base::FromPosixTimespec(clock.ts).count())));
2889   }
2890 #else  // OS_APPLE || OS_WIN && OS_NACL
2891   auto wall_time_ns = static_cast<uint64_t>(base::GetWallTimeNs().count());
2892   // The default trace clock is boot time, so we always need to emit a path to
2893   // it. However since we don't actually have a boot time source on these
2894   // platforms, pretend that wall time equals boot time.
2895   new_snapshot_data.push_back(
2896       std::make_pair(protos::pbzero::BUILTIN_CLOCK_BOOTTIME, wall_time_ns));
2897   new_snapshot_data.push_back(
2898       std::make_pair(protos::pbzero::BUILTIN_CLOCK_MONOTONIC, wall_time_ns));
2899 #endif
2900 
2901   // If we're about to update a session's latest clock snapshot that hasn't been
2902   // emitted into the trace yet, check whether the clocks have drifted enough to
2903   // warrant overriding the current snapshot values. The older snapshot would be
2904   // valid for a larger part of the currently buffered trace data because the
2905   // clock sync protocol in trace processor uses the latest clock <= timestamp
2906   // to translate times (see https://perfetto.dev/docs/concepts/clock-sync), so
2907   // we try to keep it if we can.
2908   if (!snapshot_data->empty()) {
2909     PERFETTO_DCHECK(snapshot_data->size() == new_snapshot_data.size());
2910     PERFETTO_DCHECK((*snapshot_data)[0].first ==
2911                     protos::gen::BUILTIN_CLOCK_BOOTTIME);
2912 
2913     bool update_snapshot = false;
2914     uint64_t old_boot_ns = (*snapshot_data)[0].second;
2915     uint64_t new_boot_ns = new_snapshot_data[0].second;
2916     int64_t boot_diff =
2917         static_cast<int64_t>(new_boot_ns) - static_cast<int64_t>(old_boot_ns);
2918 
2919     for (size_t i = 1; i < snapshot_data->size(); i++) {
2920       uint64_t old_ns = (*snapshot_data)[i].second;
2921       uint64_t new_ns = new_snapshot_data[i].second;
2922 
2923       int64_t diff =
2924           static_cast<int64_t>(new_ns) - static_cast<int64_t>(old_ns);
2925 
2926       // Compare the boottime delta against the delta of this clock.
2927       if (std::abs(boot_diff - diff) >= kSignificantDriftNs) {
2928         update_snapshot = true;
2929         break;
2930       }
2931     }
2932     if (!update_snapshot)
2933       return false;
2934     snapshot_data->clear();
2935   }
2936 
2937   *snapshot_data = std::move(new_snapshot_data);
2938   return true;
2939 }
2940 
EmitClockSnapshot(TracingSession * tracing_session,TracingSession::ClockSnapshotData snapshot_data,std::vector<TracePacket> * packets)2941 void TracingServiceImpl::EmitClockSnapshot(
2942     TracingSession* tracing_session,
2943     TracingSession::ClockSnapshotData snapshot_data,
2944     std::vector<TracePacket>* packets) {
2945   PERFETTO_DCHECK(!tracing_session->config.builtin_data_sources()
2946                        .disable_clock_snapshotting());
2947 
2948   protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
2949   auto* snapshot = packet->set_clock_snapshot();
2950 
2951   protos::gen::BuiltinClock trace_clock =
2952       tracing_session->config.builtin_data_sources().primary_trace_clock();
2953   if (!trace_clock)
2954     trace_clock = protos::gen::BUILTIN_CLOCK_BOOTTIME;
2955   snapshot->set_primary_trace_clock(
2956       static_cast<protos::pbzero::BuiltinClock>(trace_clock));
2957 
2958   for (auto& clock_id_and_ts : snapshot_data) {
2959     auto* c = snapshot->add_clocks();
2960     c->set_clock_id(clock_id_and_ts.first);
2961     c->set_timestamp(clock_id_and_ts.second);
2962   }
2963 
2964   packet->set_trusted_uid(static_cast<int32_t>(uid_));
2965   packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
2966   SerializeAndAppendPacket(packets, packet.SerializeAsArray());
2967 }
2968 
EmitSyncMarker(std::vector<TracePacket> * packets)2969 void TracingServiceImpl::EmitSyncMarker(std::vector<TracePacket>* packets) {
2970   // The sync marks are used to tokenize large traces efficiently.
2971   // See description in trace_packet.proto.
2972   if (sync_marker_packet_size_ == 0) {
2973     // The marker ABI expects that the marker is written after the uid.
2974     // Protozero guarantees that fields are written in the same order of the
2975     // calls. The ResynchronizeTraceStreamUsingSyncMarker test verifies the ABI.
2976     protozero::StaticBuffered<protos::pbzero::TracePacket> packet(
2977         &sync_marker_packet_[0], sizeof(sync_marker_packet_));
2978     packet->set_trusted_uid(static_cast<int32_t>(uid_));
2979     packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
2980 
2981     // Keep this last.
2982     packet->set_synchronization_marker(kSyncMarker, sizeof(kSyncMarker));
2983     sync_marker_packet_size_ = packet.Finalize();
2984   }
2985   packets->emplace_back();
2986   packets->back().AddSlice(&sync_marker_packet_[0], sync_marker_packet_size_);
2987 }
2988 
EmitStats(TracingSession * tracing_session,std::vector<TracePacket> * packets)2989 void TracingServiceImpl::EmitStats(TracingSession* tracing_session,
2990                                    std::vector<TracePacket>* packets) {
2991   protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
2992   packet->set_trusted_uid(static_cast<int32_t>(uid_));
2993   packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
2994   GetTraceStats(tracing_session).Serialize(packet->set_trace_stats());
2995   SerializeAndAppendPacket(packets, packet.SerializeAsArray());
2996 }
2997 
GetTraceStats(TracingSession * tracing_session)2998 TraceStats TracingServiceImpl::GetTraceStats(TracingSession* tracing_session) {
2999   TraceStats trace_stats;
3000   trace_stats.set_producers_connected(static_cast<uint32_t>(producers_.size()));
3001   trace_stats.set_producers_seen(last_producer_id_);
3002   trace_stats.set_data_sources_registered(
3003       static_cast<uint32_t>(data_sources_.size()));
3004   trace_stats.set_data_sources_seen(last_data_source_instance_id_);
3005   trace_stats.set_tracing_sessions(
3006       static_cast<uint32_t>(tracing_sessions_.size()));
3007   trace_stats.set_total_buffers(static_cast<uint32_t>(buffers_.size()));
3008   trace_stats.set_chunks_discarded(chunks_discarded_);
3009   trace_stats.set_patches_discarded(patches_discarded_);
3010   trace_stats.set_invalid_packets(tracing_session->invalid_packets);
3011 
3012   if (tracing_session->trace_filter) {
3013     auto* filt_stats = trace_stats.mutable_filter_stats();
3014     filt_stats->set_input_packets(tracing_session->filter_input_packets);
3015     filt_stats->set_input_bytes(tracing_session->filter_input_bytes);
3016     filt_stats->set_output_bytes(tracing_session->filter_output_bytes);
3017     filt_stats->set_errors(tracing_session->filter_errors);
3018   }
3019 
3020   for (BufferID buf_id : tracing_session->buffers_index) {
3021     TraceBuffer* buf = GetBufferByID(buf_id);
3022     if (!buf) {
3023       PERFETTO_DFATAL("Buffer not found.");
3024       continue;
3025     }
3026     *trace_stats.add_buffer_stats() = buf->stats();
3027   }  // for (buf in session).
3028   return trace_stats;
3029 }
3030 
MaybeEmitTraceConfig(TracingSession * tracing_session,std::vector<TracePacket> * packets)3031 void TracingServiceImpl::MaybeEmitTraceConfig(
3032     TracingSession* tracing_session,
3033     std::vector<TracePacket>* packets) {
3034   if (tracing_session->did_emit_config)
3035     return;
3036   tracing_session->did_emit_config = true;
3037   protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
3038   packet->set_trusted_uid(static_cast<int32_t>(uid_));
3039   packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
3040   tracing_session->config.Serialize(packet->set_trace_config());
3041   SerializeAndAppendPacket(packets, packet.SerializeAsArray());
3042 }
3043 
MaybeEmitSystemInfo(TracingSession * tracing_session,std::vector<TracePacket> * packets)3044 void TracingServiceImpl::MaybeEmitSystemInfo(
3045     TracingSession* tracing_session,
3046     std::vector<TracePacket>* packets) {
3047   if (tracing_session->did_emit_system_info)
3048     return;
3049   tracing_session->did_emit_system_info = true;
3050   protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
3051   auto* info = packet->set_system_info();
3052   info->set_tracing_service_version(base::GetVersionString());
3053 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
3054     !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
3055   struct utsname uname_info;
3056   if (uname(&uname_info) == 0) {
3057     auto* utsname_info = info->set_utsname();
3058     utsname_info->set_sysname(uname_info.sysname);
3059     utsname_info->set_version(uname_info.version);
3060     utsname_info->set_machine(uname_info.machine);
3061     utsname_info->set_release(uname_info.release);
3062   }
3063 #endif  // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
3064 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
3065   char value[PROP_VALUE_MAX];
3066   if (__system_property_get("ro.build.fingerprint", value)) {
3067     info->set_android_build_fingerprint(value);
3068   } else {
3069     PERFETTO_ELOG("Unable to read ro.build.fingerprint");
3070   }
3071   info->set_hz(sysconf(_SC_CLK_TCK));
3072 #endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
3073   packet->set_trusted_uid(static_cast<int32_t>(uid_));
3074   packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
3075   SerializeAndAppendPacket(packets, packet.SerializeAsArray());
3076 }
3077 
EmitLifecycleEvents(TracingSession * tracing_session,std::vector<TracePacket> * packets)3078 void TracingServiceImpl::EmitLifecycleEvents(
3079     TracingSession* tracing_session,
3080     std::vector<TracePacket>* packets) {
3081   using TimestampedPacket =
3082       std::pair<int64_t /* ts */, std::vector<uint8_t> /* serialized packet */>;
3083 
3084   std::vector<TimestampedPacket> timestamped_packets;
3085   for (auto& event : tracing_session->lifecycle_events) {
3086     for (int64_t ts : event.timestamps) {
3087       protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
3088       packet->set_timestamp(static_cast<uint64_t>(ts));
3089       packet->set_trusted_uid(static_cast<int32_t>(uid_));
3090       packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
3091 
3092       auto* service_event = packet->set_service_event();
3093       service_event->AppendVarInt(event.field_id, 1);
3094       timestamped_packets.emplace_back(ts, packet.SerializeAsArray());
3095     }
3096     event.timestamps.clear();
3097   }
3098 
3099   // We sort by timestamp here to ensure that the "sequence" of lifecycle
3100   // packets has monotonic timestamps like other sequences in the trace.
3101   // Note that these events could still be out of order with respect to other
3102   // events on the service packet sequence (e.g. trigger received packets).
3103   std::sort(timestamped_packets.begin(), timestamped_packets.end(),
3104             [](const TimestampedPacket& a, const TimestampedPacket& b) {
3105               return a.first < b.first;
3106             });
3107 
3108   for (const auto& pair : timestamped_packets)
3109     SerializeAndAppendPacket(packets, std::move(pair.second));
3110 }
3111 
EmitSeizedForBugreportLifecycleEvent(std::vector<TracePacket> * packets)3112 void TracingServiceImpl::EmitSeizedForBugreportLifecycleEvent(
3113     std::vector<TracePacket>* packets) {
3114   protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
3115   packet->set_timestamp(static_cast<uint64_t>(base::GetBootTimeNs().count()));
3116   packet->set_trusted_uid(static_cast<int32_t>(uid_));
3117   packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
3118   auto* service_event = packet->set_service_event();
3119   service_event->AppendVarInt(
3120       protos::pbzero::TracingServiceEvent::kSeizedForBugreportFieldNumber, 1);
3121   SerializeAndAppendPacket(packets, packet.SerializeAsArray());
3122 }
3123 
MaybeEmitReceivedTriggers(TracingSession * tracing_session,std::vector<TracePacket> * packets)3124 void TracingServiceImpl::MaybeEmitReceivedTriggers(
3125     TracingSession* tracing_session,
3126     std::vector<TracePacket>* packets) {
3127   PERFETTO_DCHECK(tracing_session->num_triggers_emitted_into_trace <=
3128                   tracing_session->received_triggers.size());
3129   for (size_t i = tracing_session->num_triggers_emitted_into_trace;
3130        i < tracing_session->received_triggers.size(); ++i) {
3131     const auto& info = tracing_session->received_triggers[i];
3132     protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
3133     auto* trigger = packet->set_trigger();
3134     trigger->set_trigger_name(info.trigger_name);
3135     trigger->set_producer_name(info.producer_name);
3136     trigger->set_trusted_producer_uid(static_cast<int32_t>(info.producer_uid));
3137 
3138     packet->set_timestamp(info.boot_time_ns);
3139     packet->set_trusted_uid(static_cast<int32_t>(uid_));
3140     packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
3141     SerializeAndAppendPacket(packets, packet.SerializeAsArray());
3142     ++tracing_session->num_triggers_emitted_into_trace;
3143   }
3144 }
3145 
MaybeSaveTraceForBugreport(std::function<void ()> callback)3146 bool TracingServiceImpl::MaybeSaveTraceForBugreport(
3147     std::function<void()> callback) {
3148   TracingSession* max_session = nullptr;
3149   TracingSessionID max_tsid = 0;
3150   for (auto& session_id_and_session : tracing_sessions_) {
3151     auto& session = session_id_and_session.second;
3152     const int32_t score = session.config.bugreport_score();
3153     // Exclude sessions with 0 (or below) score. By default tracing sessions
3154     // should NOT be eligible to be attached to bugreports.
3155     if (score <= 0 || session.state != TracingSession::STARTED)
3156       continue;
3157 
3158     // Also don't try to steal long traces with write_into_file if their content
3159     // has been already partially written into a file, as we would get partial
3160     // traces on both sides. We can't just copy the original file into the
3161     // bugreport because the file could be too big (GBs) for bugreports.
3162     // The only case where it's legit to steal traces with write_into_file, is
3163     // when the consumer specified a very large write_period_ms (e.g. 24h),
3164     // meaning that this is effectively a ring-buffer trace. Traceur (the
3165     // Android System Tracing app), which uses --detach, does this to have a
3166     // consistent invocation path for long-traces and ring-buffer-mode traces.
3167     if (session.write_into_file && session.bytes_written_into_file > 0)
3168       continue;
3169 
3170     // If we are already in the process of finalizing another trace for
3171     // bugreport, don't even start another one, as they would try to write onto
3172     // the same file.
3173     if (session.on_disable_callback_for_bugreport)
3174       return false;
3175 
3176     if (!max_session || score > max_session->config.bugreport_score()) {
3177       max_session = &session;
3178       max_tsid = session_id_and_session.first;
3179     }
3180   }
3181 
3182   // No eligible trace found.
3183   if (!max_session)
3184     return false;
3185 
3186   PERFETTO_LOG("Seizing trace for bugreport. tsid:%" PRIu64
3187                " state:%d wf:%d score:%d name:\"%s\"",
3188                max_tsid, max_session->state, !!max_session->write_into_file,
3189                max_session->config.bugreport_score(),
3190                max_session->config.unique_session_name().c_str());
3191 
3192   auto br_fd = CreateTraceFile(GetBugreportTmpPath(), /*overwrite=*/true);
3193   if (!br_fd)
3194     return false;
3195 
3196   if (max_session->write_into_file) {
3197     auto fd = *max_session->write_into_file;
3198     // If we are stealing a write_into_file session, add a marker that explains
3199     // why the trace has been stolen rather than creating an empty file. This is
3200     // only for write_into_file traces. A similar code path deals with the case
3201     // of reading-back a seized trace from IPC in ReadBuffers().
3202     if (!max_session->config.builtin_data_sources().disable_service_events()) {
3203       std::vector<TracePacket> packets;
3204       EmitSeizedForBugreportLifecycleEvent(&packets);
3205       for (auto& packet : packets) {
3206         char* preamble;
3207         size_t preamble_size = 0;
3208         std::tie(preamble, preamble_size) = packet.GetProtoPreamble();
3209         base::WriteAll(fd, preamble, preamble_size);
3210         for (const Slice& slice : packet.slices()) {
3211           base::WriteAll(fd, slice.start, slice.size);
3212         }
3213       }  // for (packets)
3214     }    // if (!disable_service_events())
3215   }      // if (max_session->write_into_file)
3216   max_session->write_into_file = std::move(br_fd);
3217   max_session->on_disable_callback_for_bugreport = std::move(callback);
3218   max_session->seized_for_bugreport = true;
3219 
3220   // Post a task to avoid that early FlushAndDisableTracing() failures invoke
3221   // the callback before we return. That would re-enter in a weird way the
3222   // callstack of the calling ConsumerEndpointImpl::SaveTraceForBugreport().
3223   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3224   task_runner_->PostTask([weak_this, max_tsid] {
3225     if (weak_this)
3226       weak_this->FlushAndDisableTracing(max_tsid);
3227   });
3228   return true;
3229 }
3230 
MaybeLogUploadEvent(const TraceConfig & cfg,PerfettoStatsdAtom atom,const std::string & trigger_name)3231 void TracingServiceImpl::MaybeLogUploadEvent(const TraceConfig& cfg,
3232                                              PerfettoStatsdAtom atom,
3233                                              const std::string& trigger_name) {
3234   if (!ShouldLogEvent(cfg))
3235     return;
3236 
3237   // If the UUID is not set for some reason, don't log anything.
3238   if (cfg.trace_uuid_lsb() == 0 && cfg.trace_uuid_msb() == 0)
3239     return;
3240 
3241   android_stats::MaybeLogUploadEvent(atom, cfg.trace_uuid_lsb(),
3242                                      cfg.trace_uuid_msb(), trigger_name);
3243 }
3244 
MaybeLogTriggerEvent(const TraceConfig & cfg,PerfettoTriggerAtom atom,const std::string & trigger_name)3245 void TracingServiceImpl::MaybeLogTriggerEvent(const TraceConfig& cfg,
3246                                               PerfettoTriggerAtom atom,
3247                                               const std::string& trigger_name) {
3248   if (!ShouldLogEvent(cfg))
3249     return;
3250   android_stats::MaybeLogTriggerEvent(atom, trigger_name);
3251 }
3252 
PurgeExpiredAndCountTriggerInWindow(int64_t now_ns,uint64_t trigger_name_hash)3253 size_t TracingServiceImpl::PurgeExpiredAndCountTriggerInWindow(
3254     int64_t now_ns,
3255     uint64_t trigger_name_hash) {
3256   PERFETTO_DCHECK(
3257       std::is_sorted(trigger_history_.begin(), trigger_history_.end()));
3258   size_t remove_count = 0;
3259   size_t trigger_count = 0;
3260   for (const TriggerHistory& h : trigger_history_) {
3261     if (h.timestamp_ns < now_ns - trigger_window_ns_) {
3262       remove_count++;
3263     } else if (h.name_hash == trigger_name_hash) {
3264       trigger_count++;
3265     }
3266   }
3267   trigger_history_.erase_front(remove_count);
3268   return trigger_count;
3269 }
3270 
3271 ////////////////////////////////////////////////////////////////////////////////
3272 // TracingServiceImpl::ConsumerEndpointImpl implementation
3273 ////////////////////////////////////////////////////////////////////////////////
3274 
ConsumerEndpointImpl(TracingServiceImpl * service,base::TaskRunner * task_runner,Consumer * consumer,uid_t uid)3275 TracingServiceImpl::ConsumerEndpointImpl::ConsumerEndpointImpl(
3276     TracingServiceImpl* service,
3277     base::TaskRunner* task_runner,
3278     Consumer* consumer,
3279     uid_t uid)
3280     : task_runner_(task_runner),
3281       service_(service),
3282       consumer_(consumer),
3283       uid_(uid),
3284       weak_ptr_factory_(this) {}
3285 
~ConsumerEndpointImpl()3286 TracingServiceImpl::ConsumerEndpointImpl::~ConsumerEndpointImpl() {
3287   service_->DisconnectConsumer(this);
3288   consumer_->OnDisconnect();
3289 }
3290 
NotifyOnTracingDisabled(const std::string & error)3291 void TracingServiceImpl::ConsumerEndpointImpl::NotifyOnTracingDisabled(
3292     const std::string& error) {
3293   PERFETTO_DCHECK_THREAD(thread_checker_);
3294   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3295   task_runner_->PostTask([weak_this, error /* deliberate copy */] {
3296     if (weak_this)
3297       weak_this->consumer_->OnTracingDisabled(error);
3298   });
3299 }
3300 
EnableTracing(const TraceConfig & cfg,base::ScopedFile fd)3301 void TracingServiceImpl::ConsumerEndpointImpl::EnableTracing(
3302     const TraceConfig& cfg,
3303     base::ScopedFile fd) {
3304   PERFETTO_DCHECK_THREAD(thread_checker_);
3305   auto status = service_->EnableTracing(this, cfg, std::move(fd));
3306   if (!status.ok())
3307     NotifyOnTracingDisabled(status.message());
3308 }
3309 
ChangeTraceConfig(const TraceConfig & cfg)3310 void TracingServiceImpl::ConsumerEndpointImpl::ChangeTraceConfig(
3311     const TraceConfig& cfg) {
3312   if (!tracing_session_id_) {
3313     PERFETTO_LOG(
3314         "Consumer called ChangeTraceConfig() but tracing was "
3315         "not active");
3316     return;
3317   }
3318   service_->ChangeTraceConfig(this, cfg);
3319 }
3320 
StartTracing()3321 void TracingServiceImpl::ConsumerEndpointImpl::StartTracing() {
3322   PERFETTO_DCHECK_THREAD(thread_checker_);
3323   if (!tracing_session_id_) {
3324     PERFETTO_LOG("Consumer called StartTracing() but tracing was not active");
3325     return;
3326   }
3327   service_->StartTracing(tracing_session_id_);
3328 }
3329 
DisableTracing()3330 void TracingServiceImpl::ConsumerEndpointImpl::DisableTracing() {
3331   PERFETTO_DCHECK_THREAD(thread_checker_);
3332   if (!tracing_session_id_) {
3333     PERFETTO_LOG("Consumer called DisableTracing() but tracing was not active");
3334     return;
3335   }
3336   service_->DisableTracing(tracing_session_id_);
3337 }
3338 
ReadBuffers()3339 void TracingServiceImpl::ConsumerEndpointImpl::ReadBuffers() {
3340   PERFETTO_DCHECK_THREAD(thread_checker_);
3341   if (!tracing_session_id_) {
3342     PERFETTO_LOG("Consumer called ReadBuffers() but tracing was not active");
3343     consumer_->OnTraceData({}, /* has_more = */ false);
3344     return;
3345   }
3346   if (!service_->ReadBuffers(tracing_session_id_, this)) {
3347     consumer_->OnTraceData({}, /* has_more = */ false);
3348   }
3349 }
3350 
FreeBuffers()3351 void TracingServiceImpl::ConsumerEndpointImpl::FreeBuffers() {
3352   PERFETTO_DCHECK_THREAD(thread_checker_);
3353   if (!tracing_session_id_) {
3354     PERFETTO_LOG("Consumer called FreeBuffers() but tracing was not active");
3355     return;
3356   }
3357   service_->FreeBuffers(tracing_session_id_);
3358   tracing_session_id_ = 0;
3359 }
3360 
Flush(uint32_t timeout_ms,FlushCallback callback)3361 void TracingServiceImpl::ConsumerEndpointImpl::Flush(uint32_t timeout_ms,
3362                                                      FlushCallback callback) {
3363   PERFETTO_DCHECK_THREAD(thread_checker_);
3364   if (!tracing_session_id_) {
3365     PERFETTO_LOG("Consumer called Flush() but tracing was not active");
3366     return;
3367   }
3368   service_->Flush(tracing_session_id_, timeout_ms, callback);
3369 }
3370 
Detach(const std::string & key)3371 void TracingServiceImpl::ConsumerEndpointImpl::Detach(const std::string& key) {
3372   PERFETTO_DCHECK_THREAD(thread_checker_);
3373   bool success = service_->DetachConsumer(this, key);
3374   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3375   task_runner_->PostTask([weak_this, success] {
3376     if (weak_this)
3377       weak_this->consumer_->OnDetach(success);
3378   });
3379 }
3380 
Attach(const std::string & key)3381 void TracingServiceImpl::ConsumerEndpointImpl::Attach(const std::string& key) {
3382   PERFETTO_DCHECK_THREAD(thread_checker_);
3383   bool success = service_->AttachConsumer(this, key);
3384   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3385   task_runner_->PostTask([weak_this, success] {
3386     if (!weak_this)
3387       return;
3388     Consumer* consumer = weak_this->consumer_;
3389     TracingSession* session =
3390         weak_this->service_->GetTracingSession(weak_this->tracing_session_id_);
3391     if (!session) {
3392       consumer->OnAttach(false, TraceConfig());
3393       return;
3394     }
3395     consumer->OnAttach(success, session->config);
3396   });
3397 }
3398 
GetTraceStats()3399 void TracingServiceImpl::ConsumerEndpointImpl::GetTraceStats() {
3400   PERFETTO_DCHECK_THREAD(thread_checker_);
3401   bool success = false;
3402   TraceStats stats;
3403   TracingSession* session = service_->GetTracingSession(tracing_session_id_);
3404   if (session) {
3405     success = true;
3406     stats = service_->GetTraceStats(session);
3407   }
3408   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3409   task_runner_->PostTask([weak_this, success, stats] {
3410     if (weak_this)
3411       weak_this->consumer_->OnTraceStats(success, stats);
3412   });
3413 }
3414 
ObserveEvents(uint32_t events_mask)3415 void TracingServiceImpl::ConsumerEndpointImpl::ObserveEvents(
3416     uint32_t events_mask) {
3417   PERFETTO_DCHECK_THREAD(thread_checker_);
3418   observable_events_mask_ = events_mask;
3419   TracingSession* session = service_->GetTracingSession(tracing_session_id_);
3420   if (!session)
3421     return;
3422 
3423   if (observable_events_mask_ & ObservableEvents::TYPE_DATA_SOURCES_INSTANCES) {
3424     // Issue initial states.
3425     for (const auto& kv : session->data_source_instances) {
3426       ProducerEndpointImpl* producer = service_->GetProducer(kv.first);
3427       PERFETTO_DCHECK(producer);
3428       OnDataSourceInstanceStateChange(*producer, kv.second);
3429     }
3430   }
3431 
3432   // If the ObserveEvents() call happens after data sources have acked already
3433   // notify immediately.
3434   if (observable_events_mask_ &
3435       ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED) {
3436     service_->MaybeNotifyAllDataSourcesStarted(session);
3437   }
3438 }
3439 
OnDataSourceInstanceStateChange(const ProducerEndpointImpl & producer,const DataSourceInstance & instance)3440 void TracingServiceImpl::ConsumerEndpointImpl::OnDataSourceInstanceStateChange(
3441     const ProducerEndpointImpl& producer,
3442     const DataSourceInstance& instance) {
3443   if (!(observable_events_mask_ &
3444         ObservableEvents::TYPE_DATA_SOURCES_INSTANCES)) {
3445     return;
3446   }
3447 
3448   if (instance.state != DataSourceInstance::CONFIGURED &&
3449       instance.state != DataSourceInstance::STARTED &&
3450       instance.state != DataSourceInstance::STOPPED) {
3451     return;
3452   }
3453 
3454   auto* observable_events = AddObservableEvents();
3455   auto* change = observable_events->add_instance_state_changes();
3456   change->set_producer_name(producer.name_);
3457   change->set_data_source_name(instance.data_source_name);
3458   if (instance.state == DataSourceInstance::STARTED) {
3459     change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED);
3460   } else {
3461     change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STOPPED);
3462   }
3463 }
3464 
OnAllDataSourcesStarted()3465 void TracingServiceImpl::ConsumerEndpointImpl::OnAllDataSourcesStarted() {
3466   if (!(observable_events_mask_ &
3467         ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED)) {
3468     return;
3469   }
3470   auto* observable_events = AddObservableEvents();
3471   observable_events->set_all_data_sources_started(true);
3472 }
3473 
3474 ObservableEvents*
AddObservableEvents()3475 TracingServiceImpl::ConsumerEndpointImpl::AddObservableEvents() {
3476   PERFETTO_DCHECK_THREAD(thread_checker_);
3477   if (!observable_events_) {
3478     observable_events_.reset(new ObservableEvents());
3479     auto weak_this = weak_ptr_factory_.GetWeakPtr();
3480     task_runner_->PostTask([weak_this] {
3481       if (!weak_this)
3482         return;
3483 
3484       // Move into a temporary to allow reentrancy in OnObservableEvents.
3485       auto observable_events = std::move(weak_this->observable_events_);
3486       weak_this->consumer_->OnObservableEvents(*observable_events);
3487     });
3488   }
3489   return observable_events_.get();
3490 }
3491 
QueryServiceState(QueryServiceStateCallback callback)3492 void TracingServiceImpl::ConsumerEndpointImpl::QueryServiceState(
3493     QueryServiceStateCallback callback) {
3494   PERFETTO_DCHECK_THREAD(thread_checker_);
3495   TracingServiceState svc_state;
3496 
3497   const auto& sessions = service_->tracing_sessions_;
3498   svc_state.set_tracing_service_version(base::GetVersionString());
3499   svc_state.set_num_sessions(static_cast<int>(sessions.size()));
3500 
3501   int num_started = 0;
3502   for (const auto& kv : sessions)
3503     num_started += kv.second.state == TracingSession::State::STARTED ? 1 : 0;
3504   svc_state.set_num_sessions_started(static_cast<int>(num_started));
3505 
3506   for (const auto& kv : service_->producers_) {
3507     auto* producer = svc_state.add_producers();
3508     producer->set_id(static_cast<int>(kv.first));
3509     producer->set_name(kv.second->name_);
3510     producer->set_sdk_version(kv.second->sdk_version_);
3511     producer->set_uid(static_cast<int32_t>(producer->uid()));
3512   }
3513 
3514   for (const auto& kv : service_->data_sources_) {
3515     const auto& registered_data_source = kv.second;
3516     auto* data_source = svc_state.add_data_sources();
3517     *data_source->mutable_ds_descriptor() = registered_data_source.descriptor;
3518     data_source->set_producer_id(
3519         static_cast<int>(registered_data_source.producer_id));
3520   }
3521   callback(/*success=*/true, svc_state);
3522 }
3523 
QueryCapabilities(QueryCapabilitiesCallback callback)3524 void TracingServiceImpl::ConsumerEndpointImpl::QueryCapabilities(
3525     QueryCapabilitiesCallback callback) {
3526   PERFETTO_DCHECK_THREAD(thread_checker_);
3527   TracingServiceCapabilities caps;
3528   caps.set_has_query_capabilities(true);
3529   caps.set_has_trace_config_output_path(true);
3530   caps.add_observable_events(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES);
3531   caps.add_observable_events(ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
3532   static_assert(ObservableEvents::Type_MAX ==
3533                     ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED,
3534                 "");
3535   callback(caps);
3536 }
3537 
SaveTraceForBugreport(SaveTraceForBugreportCallback consumer_callback)3538 void TracingServiceImpl::ConsumerEndpointImpl::SaveTraceForBugreport(
3539     SaveTraceForBugreportCallback consumer_callback) {
3540   PERFETTO_DCHECK_THREAD(thread_checker_);
3541   auto on_complete_callback = [consumer_callback] {
3542     if (rename(GetBugreportTmpPath().c_str(), GetBugreportPath().c_str())) {
3543       consumer_callback(false, "rename(" + GetBugreportTmpPath() + ", " +
3544                                    GetBugreportPath() + ") failed (" +
3545                                    strerror(errno) + ")");
3546     } else {
3547       consumer_callback(true, GetBugreportPath());
3548     }
3549   };
3550   if (!service_->MaybeSaveTraceForBugreport(std::move(on_complete_callback))) {
3551     consumer_callback(false,
3552                       "No trace with TraceConfig.bugreport_score > 0 eligible "
3553                       "for bug reporting was found");
3554   }
3555 }
3556 
3557 ////////////////////////////////////////////////////////////////////////////////
3558 // TracingServiceImpl::ProducerEndpointImpl implementation
3559 ////////////////////////////////////////////////////////////////////////////////
3560 
ProducerEndpointImpl(ProducerID id,uid_t uid,TracingServiceImpl * service,base::TaskRunner * task_runner,Producer * producer,const std::string & producer_name,const std::string & sdk_version,bool in_process,bool smb_scraping_enabled)3561 TracingServiceImpl::ProducerEndpointImpl::ProducerEndpointImpl(
3562     ProducerID id,
3563     uid_t uid,
3564     TracingServiceImpl* service,
3565     base::TaskRunner* task_runner,
3566     Producer* producer,
3567     const std::string& producer_name,
3568     const std::string& sdk_version,
3569     bool in_process,
3570     bool smb_scraping_enabled)
3571     : id_(id),
3572       uid_(uid),
3573       service_(service),
3574       task_runner_(task_runner),
3575       producer_(producer),
3576       name_(producer_name),
3577       sdk_version_(sdk_version),
3578       in_process_(in_process),
3579       smb_scraping_enabled_(smb_scraping_enabled),
3580       weak_ptr_factory_(this) {}
3581 
~ProducerEndpointImpl()3582 TracingServiceImpl::ProducerEndpointImpl::~ProducerEndpointImpl() {
3583   service_->DisconnectProducer(id_);
3584   producer_->OnDisconnect();
3585 }
3586 
RegisterDataSource(const DataSourceDescriptor & desc)3587 void TracingServiceImpl::ProducerEndpointImpl::RegisterDataSource(
3588     const DataSourceDescriptor& desc) {
3589   PERFETTO_DCHECK_THREAD(thread_checker_);
3590   if (desc.name().empty()) {
3591     PERFETTO_DLOG("Received RegisterDataSource() with empty name");
3592     return;
3593   }
3594 
3595   service_->RegisterDataSource(id_, desc);
3596 }
3597 
UnregisterDataSource(const std::string & name)3598 void TracingServiceImpl::ProducerEndpointImpl::UnregisterDataSource(
3599     const std::string& name) {
3600   PERFETTO_DCHECK_THREAD(thread_checker_);
3601   service_->UnregisterDataSource(id_, name);
3602 }
3603 
RegisterTraceWriter(uint32_t writer_id,uint32_t target_buffer)3604 void TracingServiceImpl::ProducerEndpointImpl::RegisterTraceWriter(
3605     uint32_t writer_id,
3606     uint32_t target_buffer) {
3607   PERFETTO_DCHECK_THREAD(thread_checker_);
3608   writers_[static_cast<WriterID>(writer_id)] =
3609       static_cast<BufferID>(target_buffer);
3610 }
3611 
UnregisterTraceWriter(uint32_t writer_id)3612 void TracingServiceImpl::ProducerEndpointImpl::UnregisterTraceWriter(
3613     uint32_t writer_id) {
3614   PERFETTO_DCHECK_THREAD(thread_checker_);
3615   writers_.erase(static_cast<WriterID>(writer_id));
3616 }
3617 
CommitData(const CommitDataRequest & req_untrusted,CommitDataCallback callback)3618 void TracingServiceImpl::ProducerEndpointImpl::CommitData(
3619     const CommitDataRequest& req_untrusted,
3620     CommitDataCallback callback) {
3621   PERFETTO_DCHECK_THREAD(thread_checker_);
3622 
3623   if (metatrace::IsEnabled(metatrace::TAG_TRACE_SERVICE)) {
3624     PERFETTO_METATRACE_COUNTER(TAG_TRACE_SERVICE, TRACE_SERVICE_COMMIT_DATA,
3625                                EncodeCommitDataRequest(id_, req_untrusted));
3626   }
3627 
3628   if (!shared_memory_) {
3629     PERFETTO_DLOG(
3630         "Attempted to commit data before the shared memory was allocated.");
3631     return;
3632   }
3633   PERFETTO_DCHECK(shmem_abi_.is_valid());
3634   for (const auto& entry : req_untrusted.chunks_to_move()) {
3635     const uint32_t page_idx = entry.page();
3636     if (page_idx >= shmem_abi_.num_pages())
3637       continue;  // A buggy or malicious producer.
3638 
3639     SharedMemoryABI::Chunk chunk =
3640         shmem_abi_.TryAcquireChunkForReading(page_idx, entry.chunk());
3641     if (!chunk.is_valid()) {
3642       PERFETTO_DLOG("Asked to move chunk %d:%d, but it's not complete",
3643                     entry.page(), entry.chunk());
3644       continue;
3645     }
3646 
3647     // TryAcquireChunkForReading() has load-acquire semantics. Once acquired,
3648     // the ABI contract expects the producer to not touch the chunk anymore
3649     // (until the service marks that as free). This is why all the reads below
3650     // are just memory_order_relaxed. Also, the code here assumes that all this
3651     // data can be malicious and just gives up if anything is malformed.
3652     BufferID buffer_id = static_cast<BufferID>(entry.target_buffer());
3653     const SharedMemoryABI::ChunkHeader& chunk_header = *chunk.header();
3654     WriterID writer_id = chunk_header.writer_id.load(std::memory_order_relaxed);
3655     ChunkID chunk_id = chunk_header.chunk_id.load(std::memory_order_relaxed);
3656     auto packets = chunk_header.packets.load(std::memory_order_relaxed);
3657     uint16_t num_fragments = packets.count;
3658     uint8_t chunk_flags = packets.flags;
3659 
3660     service_->CopyProducerPageIntoLogBuffer(
3661         id_, uid_, writer_id, chunk_id, buffer_id, num_fragments, chunk_flags,
3662         /*chunk_complete=*/true, chunk.payload_begin(), chunk.payload_size());
3663 
3664     // This one has release-store semantics.
3665     shmem_abi_.ReleaseChunkAsFree(std::move(chunk));
3666   }  // for(chunks_to_move)
3667 
3668   service_->ApplyChunkPatches(id_, req_untrusted.chunks_to_patch());
3669 
3670   if (req_untrusted.flush_request_id()) {
3671     service_->NotifyFlushDoneForProducer(id_, req_untrusted.flush_request_id());
3672   }
3673 
3674   // Keep this invocation last. ProducerIPCService::CommitData() relies on this
3675   // callback being invoked within the same callstack and not posted. If this
3676   // changes, the code there needs to be changed accordingly.
3677   if (callback)
3678     callback();
3679 }
3680 
SetupSharedMemory(std::unique_ptr<SharedMemory> shared_memory,size_t page_size_bytes,bool provided_by_producer)3681 void TracingServiceImpl::ProducerEndpointImpl::SetupSharedMemory(
3682     std::unique_ptr<SharedMemory> shared_memory,
3683     size_t page_size_bytes,
3684     bool provided_by_producer) {
3685   PERFETTO_DCHECK(!shared_memory_ && !shmem_abi_.is_valid());
3686   PERFETTO_DCHECK(page_size_bytes % 1024 == 0);
3687 
3688   shared_memory_ = std::move(shared_memory);
3689   shared_buffer_page_size_kb_ = page_size_bytes / 1024;
3690   is_shmem_provided_by_producer_ = provided_by_producer;
3691 
3692   shmem_abi_.Initialize(reinterpret_cast<uint8_t*>(shared_memory_->start()),
3693                         shared_memory_->size(),
3694                         shared_buffer_page_size_kb() * 1024);
3695   if (in_process_) {
3696     inproc_shmem_arbiter_.reset(new SharedMemoryArbiterImpl(
3697         shared_memory_->start(), shared_memory_->size(),
3698         shared_buffer_page_size_kb_ * 1024, this, task_runner_));
3699     inproc_shmem_arbiter_->SetDirectSMBPatchingSupportedByService();
3700   }
3701 
3702   OnTracingSetup();
3703   service_->UpdateMemoryGuardrail();
3704 }
3705 
shared_memory() const3706 SharedMemory* TracingServiceImpl::ProducerEndpointImpl::shared_memory() const {
3707   PERFETTO_DCHECK_THREAD(thread_checker_);
3708   return shared_memory_.get();
3709 }
3710 
shared_buffer_page_size_kb() const3711 size_t TracingServiceImpl::ProducerEndpointImpl::shared_buffer_page_size_kb()
3712     const {
3713   return shared_buffer_page_size_kb_;
3714 }
3715 
ActivateTriggers(const std::vector<std::string> & triggers)3716 void TracingServiceImpl::ProducerEndpointImpl::ActivateTriggers(
3717     const std::vector<std::string>& triggers) {
3718   service_->ActivateTriggers(id_, triggers);
3719 }
3720 
StopDataSource(DataSourceInstanceID ds_inst_id)3721 void TracingServiceImpl::ProducerEndpointImpl::StopDataSource(
3722     DataSourceInstanceID ds_inst_id) {
3723   // TODO(primiano): When we'll support tearing down the SMB, at this point we
3724   // should send the Producer a TearDownTracing if all its data sources have
3725   // been disabled (see b/77532839 and aosp/655179 PS1).
3726   PERFETTO_DCHECK_THREAD(thread_checker_);
3727   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3728   task_runner_->PostTask([weak_this, ds_inst_id] {
3729     if (weak_this)
3730       weak_this->producer_->StopDataSource(ds_inst_id);
3731   });
3732 }
3733 
3734 SharedMemoryArbiter*
MaybeSharedMemoryArbiter()3735 TracingServiceImpl::ProducerEndpointImpl::MaybeSharedMemoryArbiter() {
3736   if (!inproc_shmem_arbiter_) {
3737     PERFETTO_FATAL(
3738         "The in-process SharedMemoryArbiter can only be used when "
3739         "CreateProducer has been called with in_process=true and after tracing "
3740         "has started.");
3741   }
3742 
3743   PERFETTO_DCHECK(in_process_);
3744   return inproc_shmem_arbiter_.get();
3745 }
3746 
IsShmemProvidedByProducer() const3747 bool TracingServiceImpl::ProducerEndpointImpl::IsShmemProvidedByProducer()
3748     const {
3749   return is_shmem_provided_by_producer_;
3750 }
3751 
3752 // Can be called on any thread.
3753 std::unique_ptr<TraceWriter>
CreateTraceWriter(BufferID buf_id,BufferExhaustedPolicy buffer_exhausted_policy)3754 TracingServiceImpl::ProducerEndpointImpl::CreateTraceWriter(
3755     BufferID buf_id,
3756     BufferExhaustedPolicy buffer_exhausted_policy) {
3757   PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
3758   return MaybeSharedMemoryArbiter()->CreateTraceWriter(buf_id,
3759                                                        buffer_exhausted_policy);
3760 }
3761 
NotifyFlushComplete(FlushRequestID id)3762 void TracingServiceImpl::ProducerEndpointImpl::NotifyFlushComplete(
3763     FlushRequestID id) {
3764   PERFETTO_DCHECK_THREAD(thread_checker_);
3765   PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
3766   return MaybeSharedMemoryArbiter()->NotifyFlushComplete(id);
3767 }
3768 
OnTracingSetup()3769 void TracingServiceImpl::ProducerEndpointImpl::OnTracingSetup() {
3770   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3771   task_runner_->PostTask([weak_this] {
3772     if (weak_this)
3773       weak_this->producer_->OnTracingSetup();
3774   });
3775 }
3776 
Flush(FlushRequestID flush_request_id,const std::vector<DataSourceInstanceID> & data_sources)3777 void TracingServiceImpl::ProducerEndpointImpl::Flush(
3778     FlushRequestID flush_request_id,
3779     const std::vector<DataSourceInstanceID>& data_sources) {
3780   PERFETTO_DCHECK_THREAD(thread_checker_);
3781   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3782   task_runner_->PostTask([weak_this, flush_request_id, data_sources] {
3783     if (weak_this) {
3784       weak_this->producer_->Flush(flush_request_id, data_sources.data(),
3785                                   data_sources.size());
3786     }
3787   });
3788 }
3789 
SetupDataSource(DataSourceInstanceID ds_id,const DataSourceConfig & config)3790 void TracingServiceImpl::ProducerEndpointImpl::SetupDataSource(
3791     DataSourceInstanceID ds_id,
3792     const DataSourceConfig& config) {
3793   PERFETTO_DCHECK_THREAD(thread_checker_);
3794   allowed_target_buffers_.insert(static_cast<BufferID>(config.target_buffer()));
3795   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3796   task_runner_->PostTask([weak_this, ds_id, config] {
3797     if (weak_this)
3798       weak_this->producer_->SetupDataSource(ds_id, std::move(config));
3799   });
3800 }
3801 
StartDataSource(DataSourceInstanceID ds_id,const DataSourceConfig & config)3802 void TracingServiceImpl::ProducerEndpointImpl::StartDataSource(
3803     DataSourceInstanceID ds_id,
3804     const DataSourceConfig& config) {
3805   PERFETTO_DCHECK_THREAD(thread_checker_);
3806   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3807   task_runner_->PostTask([weak_this, ds_id, config] {
3808     if (weak_this)
3809       weak_this->producer_->StartDataSource(ds_id, std::move(config));
3810   });
3811 }
3812 
NotifyDataSourceStarted(DataSourceInstanceID data_source_id)3813 void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStarted(
3814     DataSourceInstanceID data_source_id) {
3815   PERFETTO_DCHECK_THREAD(thread_checker_);
3816   service_->NotifyDataSourceStarted(id_, data_source_id);
3817 }
3818 
NotifyDataSourceStopped(DataSourceInstanceID data_source_id)3819 void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStopped(
3820     DataSourceInstanceID data_source_id) {
3821   PERFETTO_DCHECK_THREAD(thread_checker_);
3822   service_->NotifyDataSourceStopped(id_, data_source_id);
3823 }
3824 
OnFreeBuffers(const std::vector<BufferID> & target_buffers)3825 void TracingServiceImpl::ProducerEndpointImpl::OnFreeBuffers(
3826     const std::vector<BufferID>& target_buffers) {
3827   if (allowed_target_buffers_.empty())
3828     return;
3829   for (BufferID buffer : target_buffers)
3830     allowed_target_buffers_.erase(buffer);
3831 }
3832 
ClearIncrementalState(const std::vector<DataSourceInstanceID> & data_sources)3833 void TracingServiceImpl::ProducerEndpointImpl::ClearIncrementalState(
3834     const std::vector<DataSourceInstanceID>& data_sources) {
3835   PERFETTO_DCHECK_THREAD(thread_checker_);
3836   auto weak_this = weak_ptr_factory_.GetWeakPtr();
3837   task_runner_->PostTask([weak_this, data_sources] {
3838     if (weak_this) {
3839       weak_this->producer_->ClearIncrementalState(data_sources.data(),
3840                                                   data_sources.size());
3841     }
3842   });
3843 }
3844 
Sync(std::function<void ()> callback)3845 void TracingServiceImpl::ProducerEndpointImpl::Sync(
3846     std::function<void()> callback) {
3847   task_runner_->PostTask(callback);
3848 }
3849 
3850 ////////////////////////////////////////////////////////////////////////////////
3851 // TracingServiceImpl::TracingSession implementation
3852 ////////////////////////////////////////////////////////////////////////////////
3853 
TracingSession(TracingSessionID session_id,ConsumerEndpointImpl * consumer,const TraceConfig & new_config,base::TaskRunner * task_runner)3854 TracingServiceImpl::TracingSession::TracingSession(
3855     TracingSessionID session_id,
3856     ConsumerEndpointImpl* consumer,
3857     const TraceConfig& new_config,
3858     base::TaskRunner* task_runner)
3859     : id(session_id),
3860       consumer_maybe_null(consumer),
3861       consumer_uid(consumer->uid_),
3862       config(new_config),
3863       snapshot_periodic_task(task_runner) {
3864   // all_data_sources_flushed is special because we store up to 64 events of
3865   // this type. Other events will go through the default case in
3866   // SnapshotLifecycleEvent() where they will be given a max history of 1.
3867   lifecycle_events.emplace_back(
3868       protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
3869       64 /* max_size */);
3870 }
3871 
3872 }  // namespace perfetto
3873