1 // Copyright 2020 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "perfetto-tracing-only.h"
15
16 #include "trace_packet.pbzero.h"
17 #include "counter_descriptor.pbzero.h"
18 #include "track_descriptor.pbzero.h"
19 #include "track_event.pbzero.h"
20 #include "interned_data.pbzero.h"
21
22 #include "perfetto/base/time.h"
23
24 #include "perfetto/protozero/message_handle.h"
25 #include "perfetto/protozero/proto_utils.h"
26 #include "perfetto/protozero/root_message.h"
27 #include "perfetto/protozero/scattered_stream_writer.h"
28
29 #include <atomic>
30 #include <chrono>
31 #include <cstdlib>
32 #include <fstream>
33 #include <thread>
34 #include <mutex>
35 #include <unordered_map>
36 #include <unordered_set>
37 #include <sstream>
38
39 namespace virtualdeviceperfetto {
40
41 static FILE* sDefaultFileHandle = nullptr;
42
43 static VirtualDeviceTraceConfig sTraceConfig = {
44 .initialized = false,
45 .tracingDisabled = true,
46 .packetsWritten = 0,
47 .sequenceIdWritten = 0,
48 .currentInterningId = 1,
49 .currentThreadId = 1,
50 .hostFilename = nullptr,
51 .guestFilename = nullptr,
52 .combinedFilename = nullptr,
53 .hostStartTime = 0,
54 .guestStartTime = 0,
55 .guestTimeDiff = 0,
56 .perThreadStorageMb = 1,
57 };
58
59 #define TRACE_STACK_DEPTH_MAX 16
60
61 class TraceContext;
62
63 struct SavedTraceInfo {
64 uint8_t* data;
65 size_t allocSize;
66 size_t written;
67 bool first;
68 };
69
70 class TraceStorage {
71 public:
add(TraceContext * context)72 void add(TraceContext* context) {
73 std::lock_guard<std::mutex> lock(mContextsLock);
74 mContexts.insert(context);
75 }
76
remove(TraceContext * context)77 void remove(TraceContext* context) {
78 std::lock_guard<std::mutex> lock(mContextsLock);
79 mContexts.erase(context);
80 }
81
onTracingEnabled()82 void onTracingEnabled() {
83 // do stuff
84 }
85
onTracingDisabled()86 void onTracingDisabled() {
87 saveTracesToDisk();
88 }
89
90 // When a thread exited early before tracing was disabled
saveTrace(const SavedTraceInfo & trace)91 void saveTrace(const SavedTraceInfo& trace) {
92 std::lock_guard<std::mutex> lock(mContextsLock);
93 saveTraceLocked(trace);
94 }
95
96 private:
saveTraceLocked(const SavedTraceInfo & trace)97 void saveTraceLocked(const SavedTraceInfo& trace) {
98 if (trace.data)
99 mSavedTraces.push_back(trace);
100 }
101
102 void saveTracesToDisk();
103
104 std::mutex mContextsLock; // protects |mContexts|
105 std::unordered_set<TraceContext*> mContexts;
106 std::vector<SavedTraceInfo> mSavedTraces;
107 };
108
109 static TraceStorage sTraceStorage;
110
111 class TraceContext : public protozero::ScatteredStreamWriter::Delegate {
112 public:
TraceContext()113 TraceContext() :
114 mWriter(this) {
115 sTraceStorage.add(this);
116 }
117
save(bool partialReset=false)118 SavedTraceInfo save(bool partialReset = false) {
119 ScopedTracingLock lock(&mTracingLock);
120 return saveLocked(partialReset);
121 }
122
~TraceContext()123 virtual ~TraceContext() {
124 sTraceStorage.remove(this);
125 if (mTraceBuffer) {
126 sTraceStorage.saveTrace(save());
127 }
128 }
129
GetNewBuffer()130 virtual protozero::ContiguousMemoryRange GetNewBuffer() {
131 if (mWritingPacket) {
132 mPacket.Finalize();
133 }
134
135 finishAndRefresh();
136
137 if (mWritingPacket) {
138 beginPacket();
139 size_t writtenThisTime = size_t(((uintptr_t)mWriter.write_ptr()) - (uintptr_t)mTraceBuffer);
140 return protozero::ContiguousMemoryRange{mWriter.write_ptr(), mWriter.write_ptr() + (mTraceBufferSize - writtenThisTime) };
141 } else {
142 return protozero::ContiguousMemoryRange{mTraceBuffer, mTraceBuffer + mTraceBufferSize};
143 }
144
145 }
146
147 #ifdef __cplusplus
148 # define CC_LIKELY( exp ) (__builtin_expect( !!(exp), true ))
149 # define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), false ))
150 #else
151 # define CC_LIKELY( exp ) (__builtin_expect( !!(exp), 1 ))
152 # define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 ))
153 #endif
154 static const uint32_t kSequenceId = 1;
155 static constexpr char kTrackNamePrefix[] = "emu-";
156 static constexpr char kCounterNamePrefix[] = "-count-";
beginTrace(const char * name)157 void beginTrace(const char* name) {
158 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
159 if (CC_UNLIKELY(mStackDepth == TRACE_STACK_DEPTH_MAX)) return;
160
161 ScopedTracingLock lock(&mTracingLock);
162
163 ensureThreadInfo();
164 bool needEmitEventIntern = false;
165 mCurrentEventNameIid[mStackDepth] = internEvent(name, &needEmitEventIntern);
166 if (CC_UNLIKELY(needEmitEventIntern)) {
167 beginPacket();
168 mPacket.set_trusted_packet_sequence_id(kSequenceId);
169 mPacket.set_sequence_flags(2 /* incremental */);
170 auto interned_data = mPacket.set_interned_data();
171 auto eventname = interned_data->add_event_names();
172 eventname->set_iid(mCurrentEventNameIid[mStackDepth]);
173 eventname->set_name(name);
174 endPacket();
175 }
176 // Finally do the actual thing
177 beginPacket();
178 mPacket.set_trusted_packet_sequence_id(kSequenceId);
179 mPacket.set_sequence_flags(2 /* incremental */);
180 mPacket.set_timestamp(getTimestamp());
181 auto trackevent = mPacket.set_track_event();
182 trackevent->set_track_uuid(mThreadId); // thread id
183 trackevent->add_category_iids(mCurrentCategoryIid[mStackDepth]);
184 trackevent->set_name_iid(mCurrentEventNameIid[mStackDepth]);
185 trackevent->set_type(::perfetto::protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN);
186 endPacket();
187 ++mStackDepth;
188 }
189
ensureThreadInfo()190 inline void ensureThreadInfo() __attribute__((always_inline)) {
191 // Write trusted sequence id if this is the first packet.
192 if (CC_UNLIKELY(1 == __atomic_add_fetch(&sTraceConfig.packetsWritten, 1, __ATOMIC_SEQ_CST))) {
193 mFirst = true;
194 sTraceConfig.sequenceIdWritten = true;
195 beginPacket();
196 mPacket.set_trusted_packet_sequence_id(1);
197 mPacket.set_sequence_flags(1);
198 endPacket();
199 } else if (!CC_LIKELY(sTraceConfig.sequenceIdWritten)) { // Not the first packet, but some other thread is writing the sequence id at the moment, wait for it.
200 while (!sTraceConfig.sequenceIdWritten);
201 }
202 // TODO: Allow changing category
203 static const char kCategory[] = "gfxstream";
204 bool needEmitCategoryIntern = false;
205 mCurrentCategoryIid[mStackDepth] = internCategory(kCategory, &needEmitCategoryIntern);
206 if (CC_UNLIKELY(needEmitCategoryIntern)) {
207 beginPacket();
208 mPacket.set_trusted_packet_sequence_id(kSequenceId);
209 mPacket.set_sequence_flags(2 /* incremental */);
210 auto interned_data = mPacket.set_interned_data();
211 auto category = interned_data->add_event_categories();
212 category->set_iid(mCurrentCategoryIid[mStackDepth]);
213 category->set_name(kCategory);
214 endPacket();
215 }
216 if (CC_UNLIKELY(mNeedToSetThreadId)) {
217 mThreadId = __atomic_add_fetch(&sTraceConfig.currentThreadId, 1, __ATOMIC_RELAXED);
218 mNeedToSetThreadId = false;
219 fprintf(stderr, "%s: found thread id: %u\n", __func__, mThreadId);
220 beginPacket();
221 mPacket.set_trusted_packet_sequence_id(kSequenceId);
222 mPacket.set_sequence_flags(2 /* incremental */);
223 auto desc = mPacket.set_track_descriptor();
224 desc->set_uuid(mThreadId);
225 desc->set_name(getTrackNameFromThreadId(mThreadId));
226 endPacket();
227 }
228 }
endTrace()229 void endTrace() {
230 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
231 if (CC_UNLIKELY(mStackDepth == TRACE_STACK_DEPTH_MAX)) return;
232 if (CC_UNLIKELY(mStackDepth == 0)) return;
233 --mStackDepth;
234
235 ScopedTracingLock lock(&mTracingLock);
236
237 // Finally do the actual thing
238 beginPacket();
239 mPacket.set_trusted_packet_sequence_id(kSequenceId);
240 mPacket.set_sequence_flags(2 /* incremental */);
241 mPacket.set_timestamp(getTimestamp());
242 auto trackevent = mPacket.set_track_event();
243 trackevent->add_category_iids(mCurrentCategoryIid[mStackDepth]);
244 trackevent->set_track_uuid(mThreadId); // thread id
245 trackevent->set_name_iid(mCurrentEventNameIid[mStackDepth]);
246 trackevent->set_type(::perfetto::protos::pbzero::TrackEvent::TYPE_SLICE_END);
247 endPacket();
248 }
249
traceCounter(const char * name,int64_t val)250 void traceCounter(const char* name, int64_t val) {
251 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
252
253 ScopedTracingLock lock(&mTracingLock);
254
255 ensureThreadInfo();
256 bool first;
257 uint32_t counterId;
258 uint64_t counterTrackUuid = getOrCreateCounterTrackUuid(name, &counterId, &first);
259 if (CC_UNLIKELY(first)) {
260 fprintf(stderr, "%s: thread id: %u has a counter: %u. uuid: 0x%llx\n", __func__, mThreadId, counterId, (unsigned long long)(counterTrackUuid));
261 beginPacket();
262 auto desc = mPacket.set_track_descriptor();
263 desc->set_uuid(counterTrackUuid);
264 desc->set_name(getTrackNameFromThreadIdAndCounterName(mThreadId, name));
265 desc->set_counter();
266 endPacket();
267 }
268 // Do the actual counter track event
269 beginPacket();
270 mPacket.set_trusted_packet_sequence_id(kSequenceId);
271 mPacket.set_sequence_flags(2 /* incremental */);
272 mPacket.set_timestamp(getTimestamp());
273 auto trackevent = mPacket.set_track_event();
274 trackevent->set_track_uuid(counterTrackUuid);
275 trackevent->set_type(::perfetto::protos::pbzero::TrackEvent::TYPE_COUNTER);
276 trackevent->set_counter_value(val);
277 endPacket();
278 }
279
waitFinish()280 void waitFinish() {
281 ScopedTracingLock lock(&mTracingLock);
282 }
283
284 private:
285 class ScopedTracingLock {
286 public:
ScopedTracingLock(std::atomic_flag * flag)287 ScopedTracingLock(std::atomic_flag* flag) : mFlag(flag) {
288 while (mFlag->test_and_set(std::memory_order_acquire)) {
289 // spin
290 }
291 }
292
~ScopedTracingLock()293 ~ScopedTracingLock() {
294 mFlag->clear(std::memory_order_release);
295 }
296 private:
297 std::atomic_flag* mFlag;
298 };
299
saveLocked(bool partialReset)300 SavedTraceInfo saveLocked(bool partialReset) {
301 // Invalidates mTraceBuffer, transfers ownership of it.
302 SavedTraceInfo info = {
303 mTraceBuffer,
304 mTraceBufferSize,
305 size_t(((uintptr_t)mWriter.write_ptr()) - (uintptr_t)mTraceBuffer),
306 mFirst,
307 };
308
309 resetLocked(partialReset);
310
311 return info;
312 }
313
resetLocked(bool partialReset)314 void resetLocked(bool partialReset) {
315 mTraceBuffer = nullptr;
316 mTraceBufferSize = 0;
317 mFirst = false;
318
319 if (partialReset) return;
320
321 mNeedToSetThreadId = true;
322 mThreadId = 0;
323 mNeedToConfigureGuestTime = true;
324 mCurrentCounterId = 1;
325 mTimeDiff = 0;
326 mStackDepth = 0;
327 mEventCategoryInterningIds.clear();
328 mEventNameInterningIds.clear();
329 mCounterNameToTrackUuids.clear();
330 mPacket.Reset(&mWriter);
331 }
332
finishAndRefresh()333 void finishAndRefresh() {
334 if (mTraceBuffer) {
335 sTraceStorage.saveTrace(saveLocked(true /* partial reset */));
336 }
337 allocTraceBuffer();
338 };
339
allocTraceBuffer()340 void allocTraceBuffer() {
341 // Freed after ownership is transferred to Trace Storage
342 mTraceBufferSize = sTraceConfig.perThreadStorageMb * 1048576;
343 mTraceBuffer = (uint8_t*)malloc(mTraceBufferSize);
344 mWriter.Reset(protozero::ContiguousMemoryRange{mTraceBuffer, mTraceBuffer + mTraceBufferSize});
345 }
346
getTimestamp()347 inline uint64_t getTimestamp() {
348 uint64_t t = (uint64_t)(::perfetto::base::GetWallTimeNs().count());
349 t += sTraceConfig.guestTimeDiff;
350 return t;
351 }
352
353 template <typename T>
writeVarInt(T value,uint8_t * target)354 static inline uint8_t* writeVarInt(T value, uint8_t* target) {
355 // If value is <= 0 we must first sign extend to int64_t (see [1]).
356 // Finally we always cast to an unsigned value to to avoid arithmetic
357 // (sign expanding) shifts in the while loop.
358 // [1]: "If you use int32 or int64 as the type for a negative number, the
359 // resulting varint is always ten bytes long".
360 // - developers.google.com/protocol-buffers/docs/encoding
361 // So for each input type we do the following casts:
362 // uintX_t -> uintX_t -> uintX_t
363 // int8_t -> int64_t -> uint64_t
364 // int16_t -> int64_t -> uint64_t
365 // int32_t -> int64_t -> uint64_t
366 // int64_t -> int64_t -> uint64_t
367 using MaybeExtendedType =
368 typename std::conditional<std::is_unsigned<T>::value, T, int64_t>::type;
369 using UnsignedType = typename std::make_unsigned<MaybeExtendedType>::type;
370
371 MaybeExtendedType extended_value = static_cast<MaybeExtendedType>(value);
372 UnsignedType unsigned_value = static_cast<UnsignedType>(extended_value);
373
374 while (unsigned_value >= 0x80) {
375 *target++ = static_cast<uint8_t>(unsigned_value) | 0x80;
376 unsigned_value >>= 7;
377 }
378 *target = static_cast<uint8_t>(unsigned_value);
379 return target + 1;
380 }
381
beginPacket()382 void beginPacket() {
383 if (CC_UNLIKELY(mTraceBuffer == nullptr)) {
384 allocTraceBuffer();
385 }
386
387 // Make sure there's enough space for the preamble and size field, and to hold a track event.
388 static const size_t kTrackEventPadding = 200; // conservatively 200 bytes
389 static const size_t kPacketHeaderSize = 4;
390 size_t neededSpace = kPacketHeaderSize + 4 + kTrackEventPadding;
391 if (mWriter.bytes_available() < neededSpace) {
392 finishAndRefresh();
393 }
394
395 mPacket.Reset(&mWriter);
396 // Write the preamble
397 constexpr uint32_t tag = protozero::proto_utils::MakeTagLengthDelimited(1 /* trace packet id */);
398 uint8_t tagScratch[10];
399 auto scratchNext = writeVarInt(tag, tagScratch);
400 mWriter.WriteBytes(tagScratch, scratchNext - tagScratch);
401 // Reserve the size field
402 uint8_t* header = mWriter.ReserveBytes(kPacketHeaderSize);
403 memset(header, 0, kPacketHeaderSize);
404 mPacket.set_size_field(header);
405 mWritingPacket = true;
406 }
407
endPacket()408 void endPacket() {
409 mWritingPacket = false;
410 mPacket.Finalize();
411 }
412
internCategory(const char * str,bool * firstTime)413 uint32_t internCategory(const char* str, bool* firstTime) {
414 auto it = mEventCategoryInterningIds.find(str);
415 if (it != mEventCategoryInterningIds.end()) {
416 *firstTime = false;
417 return it->second;
418 }
419 uint32_t res = sTraceConfig.currentInterningId;
420 mEventCategoryInterningIds[str] = res;
421 __atomic_add_fetch(&sTraceConfig.currentInterningId, 1, __ATOMIC_RELAXED);
422 *firstTime = true;
423 return res;
424 }
425
internEvent(const char * str,bool * firstTime)426 uint32_t internEvent(const char* str, bool* firstTime) {
427 auto it = mEventNameInterningIds.find(str);
428 if (it != mEventNameInterningIds.end()) {
429 *firstTime = false;
430 return it->second;
431 }
432 uint32_t res = sTraceConfig.currentInterningId;
433 mEventNameInterningIds[str] = res;
434 __atomic_add_fetch(&sTraceConfig.currentInterningId, 1, __ATOMIC_RELAXED);
435 *firstTime = true;
436 return res;
437 }
438
getTrackNameFromThreadId(uint32_t threadId)439 static std::string getTrackNameFromThreadId(uint32_t threadId) {
440 std::stringstream ss;
441 ss << kTrackNamePrefix << threadId;
442 return ss.str();
443 }
444
getTrackNameFromThreadIdAndCounterName(uint32_t threadId,const char * counterName)445 static std::string getTrackNameFromThreadIdAndCounterName(uint32_t threadId, const char* counterName) {
446 std::stringstream ss;
447 ss << kTrackNamePrefix << threadId << kCounterNamePrefix << counterName;
448 return ss.str();
449 }
450
getOrCreateCounterTrackUuid(const char * name,uint32_t * counterId,bool * firstTime)451 uint64_t getOrCreateCounterTrackUuid(const char* name, uint32_t* counterId, bool* firstTime) {
452 auto it = mCounterNameToTrackUuids.find(name);
453 uint64_t res;
454 if (CC_UNLIKELY(it == mCounterNameToTrackUuids.end())) {
455 // The counter track uuid is the thread id | shifted counter id.
456 res = (((uint64_t)mCurrentCounterId) << 32) | mThreadId;
457 mCounterNameToTrackUuids[name] = res;
458 *counterId = mCurrentCounterId;
459 *firstTime = true;
460 // Increment counter id for this thread.
461 ++mCurrentCounterId;
462 } else {
463 res = it->second;
464 *counterId = res >> 32;
465 *firstTime = false;
466 }
467 return res;
468 }
469
470 uint8_t* mTraceBuffer = nullptr;
471 size_t mTraceBufferSize = 0;
472 bool mFirst = false;
473 bool mWritingPacket = false;
474 bool mNeedToSetThreadId = true;
475 uint32_t mThreadId = 0;
476 bool mNeedToConfigureGuestTime = true;
477 uint32_t mCurrentCounterId = 1;
478 uint64_t mTimeDiff = 0;
479 std::atomic_flag mTracingLock;
480 uint32_t mStackDepth = 0;
481 uint32_t mCurrentCategoryIid[TRACE_STACK_DEPTH_MAX];
482 uint32_t mCurrentEventNameIid[TRACE_STACK_DEPTH_MAX];
483 protozero::RootMessage<::perfetto::protos::pbzero::TracePacket> mPacket;
484 protozero::ScatteredStreamWriter mWriter;
485 std::unordered_map<const char*, uint32_t> mEventCategoryInterningIds;
486 std::unordered_map<const char*, uint32_t> mEventNameInterningIds;
487 std::unordered_map<const char*, uint64_t> mCounterNameToTrackUuids;
488 };
489
asyncTraceSaveFunc()490 void asyncTraceSaveFunc() {
491 fprintf(stderr, "%s: Saving combined trace async...\n", __func__);
492
493 static const int kWaitSecondsPerIteration = 1;
494 static const int kMaxIters = 20;
495 static const int kMinItersForGuestFileSize = 2;
496
497 const char* hostFilename = sTraceConfig.hostFilename;
498 const char* guestFilename = sTraceConfig.guestFilename;
499 const char* combinedFilename = sTraceConfig.combinedFilename;
500
501 std::streampos currGuestSize = 0;
502 int numGoodGuestFileSizeIters = 0;
503 bool good = false;
504
505 for (int i = 0; i < kMaxIters; ++i) {
506 fprintf(stderr, "%s: Waiting for 1 second...\n", __func__);
507 std::this_thread::sleep_for(std::chrono::seconds(kWaitSecondsPerIteration));
508 fprintf(stderr, "%s: Querying file size of guest trace...\n", __func__);
509 std::ifstream guestFile(guestFilename, std::ios::in | std::ios::binary | std::ios::ate);
510 std::streampos size = guestFile.tellg();
511
512 if (!size) {
513 fprintf(stderr, "%s: No size, try again\n", __func__);
514 continue;
515 }
516
517 if (size != currGuestSize) {
518 fprintf(stderr, "%s: Sized changed (%llu to %llu), try again\n", __func__,
519 (unsigned long long)currGuestSize, (unsigned long long)size);
520 currGuestSize = size;
521 continue;
522 }
523
524 ++numGoodGuestFileSizeIters;
525
526 if (numGoodGuestFileSizeIters == kMinItersForGuestFileSize) {
527 fprintf(stderr, "%s: size is stable, continue saving\n", __func__);
528 good = true;
529 break;
530 }
531 }
532
533 if (!good) {
534 fprintf(stderr, "%s: Timed out when waiting for guest file to stabilize, skipping combined trace saving.\n", __func__);
535 return;
536 }
537
538 std::ifstream hostFile(hostFilename, std::ios_base::binary);
539 std::ifstream guestFile(guestFilename, std::ios_base::binary);
540 std::ofstream combinedFile(combinedFilename, std::ios::out | std::ios_base::binary);
541
542 combinedFile << guestFile.rdbuf() << hostFile.rdbuf();
543
544 fprintf(stderr, "%s: Wrote combined trace (%s)\n", __func__, combinedFilename);
545 }
546
saveTracesToDisk()547 void TraceStorage::saveTracesToDisk() {
548 fprintf(stderr, "%s: Tracing ended================================================================================\n", __func__);
549 fprintf(stderr, "%s: Saving trace to disk. Configuration:\n", __func__);
550 fprintf(stderr, "%s: host filename: %s\n", __func__, sTraceConfig.hostFilename);
551 fprintf(stderr, "%s: guest filename: %s\n", __func__, sTraceConfig.guestFilename);
552 fprintf(stderr, "%s: combined filename: %s\n", __func__, sTraceConfig.combinedFilename);
553
554 fprintf(stderr, "%s: Saving host trace first...\n", __func__);
555
556 std::lock_guard<std::mutex> lock(mContextsLock);
557
558 for (auto context: mContexts) {
559 saveTraceLocked(context->save());
560 };
561
562 std::ofstream hostOut;
563 hostOut.open(sTraceConfig.hostFilename, std::ios::out | std::ios::binary);
564
565 for (const auto& info : mSavedTraces) {
566 if (info.first) {
567 hostOut.write((const char*)(info.data), info.written);
568 }
569 }
570
571 for (const auto& info : mSavedTraces) {
572 if (!info.first) {
573 hostOut.write((const char*)(info.data), info.written);
574 }
575 free(info.data);
576 }
577
578 hostOut.close();
579
580 mSavedTraces.clear();
581
582 fprintf(stderr, "%s: Saving host trace first...(done)\n", __func__);
583
584 if (!sTraceConfig.guestFilename || !sTraceConfig.combinedFilename) {
585 fprintf(stderr, "%s: skipping guest combined trace, "
586 "either guest file name (%p) not specified or "
587 "combined file name (%p) not specified\n", __func__,
588 sTraceConfig.guestFilename,
589 sTraceConfig.combinedFilename);
590 return;
591 }
592
593 std::thread saveThread(asyncTraceSaveFunc);
594 saveThread.detach();
595 }
596
597 static thread_local TraceContext sThreadLocalTraceContext;
598
setTraceConfig(std::function<void (VirtualDeviceTraceConfig &)> f)599 PERFETTO_TRACING_ONLY_EXPORT void setTraceConfig(std::function<void(VirtualDeviceTraceConfig&)> f) {
600 f(sTraceConfig);
601 }
602
queryTraceConfig()603 PERFETTO_TRACING_ONLY_EXPORT VirtualDeviceTraceConfig queryTraceConfig() {
604 return sTraceConfig;
605 }
606
initialize(const bool ** tracingDisabledPtr)607 PERFETTO_TRACING_ONLY_EXPORT void initialize(const bool** tracingDisabledPtr) {
608 *tracingDisabledPtr = &sTraceConfig.tracingDisabled;
609 }
610
useFilenameByEnv(const char * s)611 bool useFilenameByEnv(const char* s) {
612 return s && ("" != std::string(s));
613 }
614
enableTracing()615 PERFETTO_TRACING_ONLY_EXPORT void enableTracing() {
616 const char* hostFilenameByEnv = std::getenv("VPERFETTO_HOST_FILE");
617 const char* guestFilenameByEnv = std::getenv("VPERFETTO_GUEST_FILE");
618 const char* combinedFilenameByEnv = std::getenv("VPERFETTO_COMBINED_FILE");
619
620 if (useFilenameByEnv(hostFilenameByEnv)) {
621 sTraceConfig.hostFilename = hostFilenameByEnv;
622 }
623
624 if (useFilenameByEnv(guestFilenameByEnv)) {
625 sTraceConfig.guestFilename = guestFilenameByEnv;
626 }
627
628 if (useFilenameByEnv(combinedFilenameByEnv)) {
629 sTraceConfig.combinedFilename = combinedFilenameByEnv;
630 }
631
632 // Don't enable tracing if host filename is null
633 if (!sTraceConfig.hostFilename) return;
634
635 // Don't enable it twice
636 if (!sTraceConfig.tracingDisabled) return;
637
638 fprintf(stderr, "%s: Tracing begins================================================================================\n", __func__);
639 fprintf(stderr, "%s: Configuration:\n", __func__);
640 fprintf(stderr, "%s: host filename: %s (possibly set via $VPERFETTO_HOST_FILE)\n", __func__, sTraceConfig.hostFilename);
641 fprintf(stderr, "%s: guest filename: %s (possibly set via $VPERFETTO_GUEST_FILE)\n", __func__, sTraceConfig.guestFilename);
642 fprintf(stderr, "%s: combined filename: %s (possibly set via $VPERFETTO_COMBINED_FILE)\n", __func__, sTraceConfig.combinedFilename);
643 fprintf(stderr, "%s: guest time diff to add to host time: %llu\n", __func__, (unsigned long long)sTraceConfig.guestTimeDiff);
644
645 sTraceConfig.packetsWritten = 0;
646 sTraceConfig.sequenceIdWritten = 0;
647 sTraceConfig.currentInterningId = 1;
648 sTraceConfig.currentThreadId = 1;
649
650 sTraceStorage.onTracingEnabled();
651 sTraceConfig.tracingDisabled = 0;
652 }
653
disableTracing()654 PERFETTO_TRACING_ONLY_EXPORT void disableTracing() {
655 // Don't enable or disable tracing if host filename is null
656 if (!sTraceConfig.hostFilename) return;
657
658 uint32_t tracingWasDisabled = sTraceConfig.tracingDisabled;
659 sTraceConfig.tracingDisabled = 1;
660
661 if (!tracingWasDisabled) {
662 sTraceStorage.onTracingDisabled();
663 }
664
665 sTraceConfig.packetsWritten = 0;
666 sTraceConfig.sequenceIdWritten = 0;
667 sTraceConfig.currentInterningId = 1;
668 sTraceConfig.currentThreadId = 1;
669 sTraceConfig.guestTimeDiff = 0;
670 }
671
beginTrace(const char * name)672 PERFETTO_TRACING_ONLY_EXPORT void beginTrace(const char* name) {
673 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
674 sThreadLocalTraceContext.beginTrace(name);
675 }
676
endTrace()677 PERFETTO_TRACING_ONLY_EXPORT void endTrace() {
678 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
679 sThreadLocalTraceContext.endTrace();
680 }
681
traceCounter(const char * name,int64_t val)682 PERFETTO_TRACING_ONLY_EXPORT void traceCounter(const char* name, int64_t val) {
683 if (CC_LIKELY(sTraceConfig.tracingDisabled)) return;
684 sThreadLocalTraceContext.traceCounter(name, val);
685 }
686
setGuestTime(uint64_t t)687 PERFETTO_TRACING_ONLY_EXPORT void setGuestTime(uint64_t t) {
688 virtualdeviceperfetto::setTraceConfig([t](virtualdeviceperfetto::VirtualDeviceTraceConfig& config) {
689 // can only be set before tracing
690 if (!config.tracingDisabled) {
691 return;
692 }
693 config.guestStartTime = t;
694 config.hostStartTime = (uint64_t)(::perfetto::base::GetWallTimeNs().count());
695 config.guestTimeDiff = config.guestStartTime - config.hostStartTime;
696 });
697 }
698
699 } // namespace virtualdeviceperfetto
700