1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *
15 * Description: FlowController implements
16 */
17 #include "flow_controller.h"
18
19 #include <algorithm>
20 #include <cinttypes>
21 #include <set>
22 #include <sys/stat.h>
23 #include <sys/types.h>
24 #include <unistd.h>
25
26 #include "bytrace_ops.h"
27 #include "file_utils.h"
28 #include "ftrace_field_parser.h"
29 #include "ftrace_fs_ops.h"
30 #include "logging.h"
31
32 namespace {
33 #ifndef PAGE_SIZE
34 constexpr uint32_t PAGE_SIZE = 4096;
35 #endif
36 constexpr int KB_PER_PAGE = PAGE_SIZE / 1024;
37 constexpr uint32_t BYTE_PER_KB = 1024;
38 constexpr uint32_t MAX_FLUSH_INTERVAL = 1800 * 1000;
39 constexpr uint32_t MAX_FLUSH_THRESHOLD = 128 * 1024 * 1024;
40 constexpr uint32_t MAX_TRACE_PERIOD_MS = 720 * 1000;
41 constexpr uint32_t MAX_BUFFER_SIZE_KB = 64 * 1024; // 64MB
42 constexpr uint32_t MIN_BUFFER_SIZE_KB = 1024; // 1 MB
43 constexpr uint32_t DEFAULT_TRACE_PERIOD_MS = 250; // 250 ms
44 constexpr uint32_t MAX_BLOCK_SIZE_PAGES = 4096; // 16 MB
45 constexpr uint32_t MIN_BLOCK_SIZE_PAGES = 256; // 1 MB
46 const std::set<std::string> g_availableClocks = {"boot", "global", "local", "mono"};
47 } // namespace
48
49 FTRACE_NS_BEGIN
GetTraceOps()50 std::unique_ptr<TraceOps> FlowController::GetTraceOps()
51 {
52 std::vector<std::unique_ptr<TraceOps>> traceOps;
53 traceOps.emplace_back(std::make_unique<BytraceOps>());
54
55 for (size_t i = 0; i < traceOps.size(); i++) {
56 auto& ops = traceOps[i];
57 CHECK_TRUE(ops != nullptr, nullptr, "traceOps[%zu] is null!", i);
58 if (ops->IsSupported()) {
59 return std::move(ops);
60 }
61 }
62 return nullptr;
63 }
64
FlowController()65 FlowController::FlowController()
66 {
67 ftraceParser_ = std::make_unique<FtraceParser>();
68 ksymsParser_ = std::make_unique<KernelSymbolsParser>();
69 ftraceSupported_ = FtraceFsOps::GetInstance().GetFtraceRoot().size() > 0;
70 }
71
~FlowController(void)72 FlowController::~FlowController(void)
73 {
74 HILOG_INFO(LOG_CORE, "FlowController destroy!");
75 }
76
SetWriter(const WriterStructPtr & writer)77 int FlowController::SetWriter(const WriterStructPtr& writer)
78 {
79 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
80 CHECK_TRUE(resultWriter_ == nullptr, 0, "writer already setted!");
81
82 CHECK_NOTNULL(writer, -1, "writer null!");
83 auto transmiter = std::make_unique<ResultTransporter>("Transporter", writer);
84 CHECK_NOTNULL(transmiter, -1, "create ResultTransporter FAILED!");
85
86 // get CPU core numbers
87 int nprocs = static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
88 CHECK_TRUE(nprocs > 0, -1, "get processor number failed!");
89 platformCpuNum_ = nprocs;
90
91 auto traceOps = GetTraceOps();
92 CHECK_NOTNULL(traceOps, -1, "get trace ops failed!");
93
94 // init FtraceParser
95 CHECK_NOTNULL(ftraceParser_, 0, "FtraceParser create FAILED!");
96 CHECK_TRUE(ftraceParser_->Init(), -1, "ftrace parser init failed!");
97
98 // init KernelSymbolsParser
99 CHECK_NOTNULL(ksymsParser_, 0, "KernelSymbolsParser create FAILED!");
100 ksymsParser_->Parse(FtraceFsOps::GetInstance().GetKernelSymbols());
101
102 CHECK_TRUE(AddPlatformEventsToParser(), -1, "add platform events to parser failed!");
103
104 resultWriter_ = writer;
105 tansporter_ = std::move(transmiter);
106 traceOps_ = std::move(traceOps);
107 return 0;
108 }
109
CreateRawDataReaders()110 bool FlowController::CreateRawDataReaders()
111 {
112 for (int i = 0; i < platformCpuNum_; i++) {
113 auto reader = std::make_unique<FtraceDataReader>(FtraceFsOps::GetInstance().GetRawTracePath(i));
114 CHECK_NOTNULL(reader, false, "create reader %d FAILED!", i);
115 ftraceReaders_.emplace_back(std::move(reader));
116 }
117 return true;
118 }
119
CreatePagedMemoryPool()120 bool FlowController::CreatePagedMemoryPool()
121 {
122 HILOG_INFO(LOG_CORE, "create memory pool, buffer_size_kb = %u", bufferSizeKb_);
123 size_t bufferSizePages = bufferSizeKb_ / KB_PER_PAGE;
124 size_t pagesPerBlock = bufferSizePages / platformCpuNum_;
125 if (pagesPerBlock < MIN_BLOCK_SIZE_PAGES) {
126 pagesPerBlock = MIN_BLOCK_SIZE_PAGES;
127 }
128 if (pagesPerBlock > MAX_BLOCK_SIZE_PAGES) {
129 pagesPerBlock = MAX_BLOCK_SIZE_PAGES;
130 }
131 memPool_ = std::make_unique<PagedMemPool>(pagesPerBlock, platformCpuNum_);
132 CHECK_NOTNULL(memPool_, false, "create PagedMemPool FAILED!");
133 return true;
134 }
135
CreateRawDataBuffers()136 bool FlowController::CreateRawDataBuffers()
137 {
138 for (int i = 0; i < platformCpuNum_; i++) {
139 using u8ptr = std::unique_ptr<uint8_t>::pointer;
140 auto buffer = std::shared_ptr<uint8_t>(reinterpret_cast<u8ptr>(memPool_->Allocate()),
141 [&](u8ptr block) { this->memPool_->Recycle(block); });
142 CHECK_NOTNULL(buffer, false, "create buffer %d failed!", i);
143 ftraceBuffers_.push_back(buffer);
144 };
145 return true;
146 }
147
CreateRawDataCaches()148 bool FlowController::CreateRawDataCaches()
149 {
150 for (size_t i = 0; i < rawDataDumpPath_.size(); i++) {
151 auto& path = rawDataDumpPath_[i];
152 HILOG_INFO(LOG_CORE, "create raw data cache[%zu]: %s", i, path.c_str());
153
154 if (path.empty() || (path.length() >= PATH_MAX) || (path.find("..") != std::string::npos)) {
155 HILOG_ERROR(LOG_CORE, "%s:path is invalid: %s, errno=%d", __func__, path.c_str(), errno);
156 return false;
157 }
158 auto cache = std::shared_ptr<FILE>(fopen(path.c_str(), "wb+"), [](FILE* fp) { fclose(fp); });
159 CHECK_NOTNULL(cache, false, "create cache[%zu]: %s failed!", i, path.c_str());
160 rawDataDumpFile_.emplace_back(std::move(cache));
161 }
162 return true;
163 }
164
StartCapture(void)165 int FlowController::StartCapture(void)
166 {
167 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
168 CHECK_NOTNULL(ftraceParser_, -1, "create FtraceParser FAILED!");
169 CHECK_NOTNULL(ksymsParser_, -1, "create KernelSymbolsParser FAILED!");
170 CHECK_NOTNULL(tansporter_, -1, "crated ResultTransporter FAILED!");
171 CHECK_NOTNULL(traceOps_, -1, "create TraceOps FAILED!");
172
173 // get clock times
174 if (getClockTimes_) {
175 CHECK_TRUE(ReportClockTimes(), -1, "report clock times FAILED!");
176 }
177
178 // parse kernel symbols
179 if (parseKsyms_) {
180 CHECK_TRUE(ParseKernelSymbols(), -1, "parse kernel symbols FAILED!");
181 }
182
183 // parse per cpu stats
184 CHECK_TRUE(ParsePerCpuStatus(TRACE_START), -1, "parse TRACE_START stats failed!");
185
186 // create memory pool, and raw data readers, buffers, caches.
187 CHECK_TRUE(CreatePagedMemoryPool(), -1, "create paged memory pool failed!");
188 CHECK_TRUE(CreateRawDataReaders(), -1, "create raw data readers failed!");
189 CHECK_TRUE(CreateRawDataBuffers(), -1, "create raw data buffers failed!");
190 CHECK_TRUE(CreateRawDataCaches(), -1, "create raw data caches failed!");
191
192 // clear old trace
193 FtraceFsOps::GetInstance().ClearTraceBuffer();
194
195 // enable additional record options
196 FtraceFsOps::GetInstance().SetRecordCmdOption(true);
197 FtraceFsOps::GetInstance().SetRecordTgidOption(true);
198
199 // start ftrace event data polling thread
200 keepRunning_ = true;
201 pollThread_ = std::thread(&FlowController::CaptureWork, this);
202
203 // enable ftrace event switches
204 if (traceCategories_.size() > 0) {
205 traceOps_->EnableCategories(traceCategories_);
206 }
207 EnableTraceEvents();
208
209 return 0;
210 }
211
SetScheduler(int policy,int priority)212 static bool SetScheduler(int policy, int priority)
213 {
214 HILOG_DEBUG(LOG_CORE, "before sched_setscheduler policy: %x", sched_getscheduler(0));
215 struct sched_param param = {};
216 param.sched_priority = priority;
217 int retval = sched_setscheduler(0, policy, ¶m);
218 CHECK_TRUE(retval != -1, false, "set sechedule policy failed!");
219 HILOG_DEBUG(LOG_CORE, "after sched_setscheduler policy: %x", sched_getscheduler(0));
220 return true;
221 }
222
CaptureWork()223 void FlowController::CaptureWork()
224 {
225 pthread_setname_np(pthread_self(), "TraceReader");
226 HILOG_DEBUG(LOG_CORE, "FlowController::CaptureWork start!");
227
228 UNUSED_PARAMETER(SetScheduler);
229 auto tracePeriod = std::chrono::milliseconds(tracePeriodMs_);
230 std::vector<long> rawDataBytes(platformCpuNum_, 0);
231
232 while (keepRunning_) {
233 std::this_thread::sleep_for(tracePeriod);
234
235 // read data from percpu trace_pipe_raw, consume kernel ring buffers
236 for (size_t i = 0; i < rawDataBytes.size(); i++) {
237 long nbytes = ReadEventData(i);
238 rawDataBytes[i] = nbytes;
239 }
240
241 // append buffer data to cache
242 for (size_t i = 0; i < rawDataDumpFile_.size(); i++) {
243 auto& file = rawDataDumpFile_[i];
244 size_t writen = fwrite(ftraceBuffers_[i].get(), sizeof(uint8_t), rawDataBytes[i], file.get());
245 HILOG_INFO(LOG_CORE, "Append raw data to cache[%zu]: %zu/%ld bytes", i, writen, rawDataBytes[i]);
246 }
247
248 // parse ftrace metadata
249 ftraceParser_->ParseSavedTgid(FtraceFsOps::GetInstance().GetSavedTgids());
250 ftraceParser_->ParseSavedCmdlines(FtraceFsOps::GetInstance().GetSavedCmdLines());
251
252 // parse ftrace percpu event data
253 for (size_t i = 0; i < rawDataBytes.size(); i++) {
254 HILOG_INFO(LOG_CORE, "Parse raw data for CPU%zu: %ld bytes...", i, rawDataBytes[i]);
255 ParseEventData(i, rawDataBytes[i]);
256 }
257 }
258 HILOG_DEBUG(LOG_CORE, "FlowController::CaptureWork done!");
259 }
260
ReadEventData(int cpuid)261 long FlowController::ReadEventData(int cpuid)
262 {
263 auto buffer = ftraceBuffers_[cpuid].get();
264 auto reader = ftraceReaders_[cpuid].get();
265 auto bufferSize = static_cast<long>(memPool_->GetBlockSize());
266 (void)memset_s(buffer, bufferSize, 0, bufferSize);
267
268 long nbytes = 0;
269 long used = 0;
270 long rest = bufferSize;
271 while ((nbytes = reader->Read(&buffer[used], rest)) > 0 && used < bufferSize) {
272 CHECK_TRUE(used % PAGE_SIZE == 0, used, "used invalid!");
273 used += nbytes;
274 rest -= nbytes;
275 }
276 return used;
277 }
278
ParseEventData(int cpuid,long dataSize)279 bool FlowController::ParseEventData(int cpuid, long dataSize)
280 {
281 auto buffer = ftraceBuffers_[cpuid].get();
282 auto endPtr = buffer + dataSize;
283
284 for (auto page = buffer; page < endPtr; page += PAGE_SIZE) {
285 CHECK_TRUE(ParseFtraceEvent(cpuid, page), false, "parse raw event for cpu-%d failed!", cpuid);
286 }
287 return true;
288 }
289
StopCapture(void)290 int FlowController::StopCapture(void)
291 {
292 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
293 CHECK_NOTNULL(tansporter_, -1, "crate ResultTransporter FAILED!");
294 CHECK_NOTNULL(traceOps_, -1, "create TraceOps FAILED!");
295
296 // disable ftrace event switches
297 DisableTraceEvents();
298
299 // stop ftrace event data polling thread
300 keepRunning_ = false;
301 if (pollThread_.joinable()) {
302 HILOG_INFO(LOG_CORE, "join thread start!\n");
303 pollThread_.join();
304 HILOG_INFO(LOG_CORE, "join thread done!\n");
305 }
306
307 // disable userspace trace triggers
308 // because trace cmd will read trace buffer,
309 // so we to this action after polling thread exit.
310 if (traceCategories_.size() > 0) {
311 traceOps_->DisableCategories();
312 }
313
314 // parse per cpu stats
315 CHECK_TRUE(ParsePerCpuStatus(TRACE_END), -1, "parse TRACE_END stats failed!");
316
317 // release resources
318 rawDataDumpFile_.clear(); // close raw data dump files
319 ftraceReaders_.clear(); // release ftrace data readers
320 ftraceBuffers_.clear(); // release ftrace event read buffers
321 memPool_.reset(); // release memory pool
322 return 0;
323 }
324
ParsePerCpuStatus(int stage)325 bool FlowController::ParsePerCpuStatus(int stage)
326 {
327 auto tracePacket = std::make_unique<TracePluginResult>();
328 CHECK_NOTNULL(tracePacket, nullptr, "create TracePluginResult FAILED!");
329
330 auto cpuStatsMsg = tracePacket->add_ftrace_cpu_stats();
331 if (stage == TRACE_START) {
332 cpuStatsMsg->set_status(FtraceCpuStatsMsg_Status_TRACE_START);
333 } else {
334 cpuStatsMsg->set_status(FtraceCpuStatsMsg_Status_TRACE_END);
335 }
336
337 std::string traceClock = FtraceFsOps::GetInstance().GetTraceClock();
338 if (traceClock.size() > 0) {
339 cpuStatsMsg->set_trace_clock(traceClock);
340 }
341
342 for (int i = 0; i < platformCpuNum_; i++) {
343 HILOG_INFO(LOG_CORE, "[%d] ParsePerCpuStatus %d!", i, stage);
344 PerCpuStats stats = {};
345 stats.cpuIndex = i;
346 ftraceParser_->ParsePerCpuStatus(stats, FtraceFsOps::GetInstance().GetPerCpuStats(i));
347 auto perCpuMsg = cpuStatsMsg->add_per_cpu_stats();
348 perCpuMsg->set_cpu(stats.cpuIndex);
349 perCpuMsg->set_entries(stats.entries);
350 perCpuMsg->set_overrun(stats.overrun);
351 perCpuMsg->set_commit_overrun(stats.commitOverrun);
352 perCpuMsg->set_bytes(stats.bytes);
353 perCpuMsg->set_oldest_event_ts(stats.oldestEventTs);
354 perCpuMsg->set_now_ts(stats.nowTs);
355 perCpuMsg->set_dropped_events(stats.droppedEvents);
356 perCpuMsg->set_read_events(stats.readEvents);
357 }
358
359 return tansporter_->Submit(std::move(tracePacket));
360 }
361
ReportClockTimes()362 bool FlowController::ReportClockTimes()
363 {
364 auto traceResult = std::make_unique<TracePluginResult>();
365 CHECK_NOTNULL(traceResult, nullptr, "create TracePluginResult FAILED!");
366
367 std::map<clockid_t, ClockDetailMsg::ClockId> clocksMap = {
368 {CLOCK_REALTIME, ClockDetailMsg::REALTIME},
369 {CLOCK_REALTIME_COARSE, ClockDetailMsg::REALTIME_COARSE},
370 {CLOCK_MONOTONIC, ClockDetailMsg::MONOTONIC},
371 {CLOCK_MONOTONIC_COARSE, ClockDetailMsg::MONOTONIC_COARSE},
372 {CLOCK_MONOTONIC_RAW, ClockDetailMsg::MONOTONIC_RAW},
373 {CLOCK_BOOTTIME, ClockDetailMsg::BOOTTIME},
374 };
375 for (auto& entry : clocksMap) {
376 struct timespec ts = {};
377 clock_gettime(entry.first, &ts);
378 auto clockMsg = traceResult->add_clocks_detail();
379 CHECK_NOTNULL(clockMsg, nullptr, "add clock_detail failed for %d!", entry.first);
380 clockMsg->set_id(entry.second);
381 auto timeMsg = clockMsg->mutable_time();
382 timeMsg->set_tv_sec(ts.tv_sec);
383 timeMsg->set_tv_nsec(ts.tv_nsec);
384
385 struct timespec tsResolution = {};
386 clock_getres(entry.first, &tsResolution);
387 auto resolutionMsg = clockMsg->mutable_resolution();
388 resolutionMsg->set_tv_sec(tsResolution.tv_sec);
389 resolutionMsg->set_tv_nsec(tsResolution.tv_nsec);
390 }
391 return tansporter_->Submit(std::move(traceResult));
392 }
393
ParseKernelSymbols()394 bool FlowController::ParseKernelSymbols()
395 {
396 auto traceResult = std::make_unique<TracePluginResult>();
397 CHECK_NOTNULL(traceResult, nullptr, "create TracePluginResult FAILED!");
398
399 ksymsParser_->Accept([&traceResult](const KernelSymbol& symbol) {
400 auto symbolDetail = traceResult->add_symbols_detail();
401 symbolDetail->set_symbol_addr(symbol.addr);
402 symbolDetail->set_symbol_name(symbol.name);
403 });
404 HILOG_INFO(LOG_CORE, "parse kernel symbol message done!");
405 return tansporter_->Submit(std::move(traceResult));
406 }
407
ParseFtraceEvent(int cpuid,uint8_t page[])408 bool FlowController::ParseFtraceEvent(int cpuid, uint8_t page[])
409 {
410 auto tracePacket = std::make_unique<TracePluginResult>();
411 CHECK_NOTNULL(tracePacket, nullptr, "create TracePluginResult FAILED!");
412
413 auto cpudetail = tracePacket->add_ftrace_cpu_detail();
414 cpudetail->set_cpu(static_cast<uint32_t>(cpuid));
415
416 CHECK_TRUE(ftraceParser_->ParsePage(*cpudetail, page, PAGE_SIZE), nullptr, "parse page failed!");
417
418 return tansporter_->Submit(std::move(tracePacket));
419 }
420
AddPlatformEventsToParser(void)421 bool FlowController::AddPlatformEventsToParser(void)
422 {
423 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
424
425 HILOG_INFO(LOG_CORE, "Add platform events to parser start!");
426 for (auto& typeName : FtraceFsOps::GetInstance().GetPlatformEvents()) {
427 std::string type = typeName.first;
428 std::string name = typeName.second;
429 if (ftraceParser_->SetupEvent(type, name)) {
430 supportedEvents_.push_back(typeName);
431 }
432 }
433 HILOG_INFO(LOG_CORE, "Add platform events to parser done, events: %zu!", supportedEvents_.size());
434 return true;
435 }
436
PrintTraceConfig(const TracePluginConfig & config)437 static void PrintTraceConfig(const TracePluginConfig& config)
438 {
439 HILOG_DEBUG(LOG_CORE, "ftrace_events: %d", config.ftrace_events_size());
440 for (auto& event : config.ftrace_events()) {
441 HILOG_DEBUG(LOG_CORE, " '%s'", event.c_str());
442 }
443
444 HILOG_DEBUG(LOG_CORE, "bytrace_apps: %d", config.bytrace_apps_size());
445 for (auto& app : config.bytrace_apps()) {
446 HILOG_DEBUG(LOG_CORE, " '%s'", app.c_str());
447 }
448
449 HILOG_DEBUG(LOG_CORE, "bytrace_categories: %d", config.bytrace_categories_size());
450 for (auto& category : config.bytrace_categories()) {
451 HILOG_DEBUG(LOG_CORE, " '%s'", category.c_str());
452 }
453
454 HILOG_DEBUG(LOG_CORE, "buffer_size_kb: %u", config.buffer_size_kb());
455 HILOG_DEBUG(LOG_CORE, "flush_interval_ms: %u", config.flush_interval_ms());
456 HILOG_DEBUG(LOG_CORE, "flush_threshold_kb: %u", config.flush_threshold_kb());
457
458 HILOG_DEBUG(LOG_CORE, "parse_ksyms: %s", config.parse_ksyms() ? "true" : "false");
459 HILOG_DEBUG(LOG_CORE, "clock: '%s'", config.clock().c_str());
460 HILOG_DEBUG(LOG_CORE, "trace_period_ms: %d", config.trace_period_ms());
461 }
462
LoadConfig(const uint8_t configData[],uint32_t size)463 int FlowController::LoadConfig(const uint8_t configData[], uint32_t size)
464 {
465 CHECK_TRUE(size > 0, -1, "config data size is zero!");
466 CHECK_NOTNULL(configData, -1, "config data is null!");
467 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
468 CHECK_NOTNULL(tansporter_, -1, "ResultTransporter crated FAILED!");
469
470 TracePluginConfig traceConfig;
471 CHECK_TRUE(traceConfig.ParseFromArray(configData, size), -1, "parse %u bytes configData failed!", size);
472 PrintTraceConfig(traceConfig);
473
474 // sort and save user requested trace events
475 std::set<std::string> events(traceConfig.ftrace_events().begin(), traceConfig.ftrace_events().end());
476 for (auto ftraceEvent : events) {
477 requestEvents_.push_back(ftraceEvent);
478 HILOG_INFO(LOG_CORE, "ftraceEvent: %s", ftraceEvent.c_str());
479 }
480
481 traceApps_.assign(traceConfig.bytrace_apps().begin(), traceConfig.bytrace_apps().end());
482 traceCategories_.assign(traceConfig.bytrace_categories().begin(), traceConfig.bytrace_categories().end());
483
484 // setup trace clock
485 if (g_availableClocks.count(traceConfig.clock()) > 0) {
486 FtraceFsOps::GetInstance().SetTraceClock(traceConfig.clock());
487 }
488
489 // setup parse kernel symbol option
490 parseKsyms_ = traceConfig.parse_ksyms();
491
492 // setup trace buffer size
493 SetupTraceBufferSize(traceConfig.buffer_size_kb());
494
495 // setup transporter flush params
496 SetupTransporterFlushParams(traceConfig.flush_interval_ms(), traceConfig.flush_threshold_kb());
497
498 // generate raw data file names
499 GenerateRawDataFileNames(traceConfig.raw_data_prefix());
500
501 // setup trace period param
502 SetupTraceReadPeriod(traceConfig.trace_period_ms());
503 return 0;
504 }
505
SetupTraceBufferSize(uint32_t sizeKb)506 void FlowController::SetupTraceBufferSize(uint32_t sizeKb)
507 {
508 if (sizeKb < MIN_BUFFER_SIZE_KB) {
509 bufferSizeKb_ = MIN_BUFFER_SIZE_KB;
510 } else if (sizeKb > MAX_BUFFER_SIZE_KB) {
511 bufferSizeKb_ = MAX_BUFFER_SIZE_KB;
512 } else {
513 bufferSizeKb_ = sizeKb / KB_PER_PAGE * KB_PER_PAGE;
514 }
515 FtraceFsOps::GetInstance().SetBufferSizeKb(bufferSizeKb_);
516 }
517
SetupTransporterFlushParams(uint32_t flushInterval,uint32_t flushThresholdKb)518 void FlowController::SetupTransporterFlushParams(uint32_t flushInterval, uint32_t flushThresholdKb)
519 {
520 if (flushInterval > 0 && flushInterval <= MAX_FLUSH_INTERVAL) {
521 tansporter_->SetFlushInterval(flushInterval);
522 }
523 if (flushThresholdKb > 0 && flushThresholdKb <= MAX_FLUSH_THRESHOLD) {
524 tansporter_->SetFlushThreshold(flushThresholdKb * BYTE_PER_KB);
525 }
526 }
527
GenerateRawDataFileNames(const std::string & prefix)528 void FlowController::GenerateRawDataFileNames(const std::string& prefix)
529 {
530 if (prefix.size() > 0) {
531 for (int i = 0; i < platformCpuNum_; i++) {
532 std::string path = prefix + std::to_string(i);
533 rawDataDumpPath_.push_back(path);
534 }
535 }
536 }
537
SetupTraceReadPeriod(uint32_t tracePeriod)538 void FlowController::SetupTraceReadPeriod(uint32_t tracePeriod)
539 {
540 if (tracePeriod > 0 && tracePeriod <= MAX_TRACE_PERIOD_MS) {
541 tracePeriodMs_ = tracePeriod;
542 } else {
543 tracePeriodMs_ = DEFAULT_TRACE_PERIOD_MS;
544 }
545 }
546
EnableTraceEvents(void)547 void FlowController::EnableTraceEvents(void)
548 {
549 std::unordered_set<std::string> userEventSet(requestEvents_.begin(), requestEvents_.end());
550 for (auto& event : supportedEvents_) {
551 std::string type = event.first;
552 std::string name = event.second;
553 if (userEventSet.count(type + "/" + name)) { // user config format
554 if (FtraceFsOps::GetInstance().EnableEvent(type, name)) {
555 FtraceFsOps::GetInstance().AppendSetEvent(type, name);
556 enabledEvents_.push_back(event);
557 }
558 }
559 }
560 FtraceFsOps::GetInstance().EnableTracing();
561 }
562
DisableTraceEvents(void)563 void FlowController::DisableTraceEvents(void)
564 {
565 FtraceFsOps::GetInstance().DisableTracing();
566 for (auto& event : enabledEvents_) {
567 std::string type = event.first;
568 std::string name = event.second;
569 FtraceFsOps::GetInstance().DisableEvent(type, name);
570 }
571 enabledEvents_.clear();
572 }
573 FTRACE_NS_END
574