1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ETMDecoder.h"
18
19 #include <sstream>
20
21 #include <android-base/expected.h>
22 #include <android-base/logging.h>
23 #include <android-base/strings.h>
24 #include <llvm/Support/MemoryBuffer.h>
25 #include <opencsd.h>
26
27 #include "ETMConstants.h"
28
29 namespace simpleperf {
30 namespace {
31
32 class DecoderLogStr : public ocsdMsgLogStrOutI {
33 public:
printOutStr(const std::string & out_str)34 void printOutStr(const std::string& out_str) override { LOG(DEBUG) << out_str; }
35 };
36
37 class DecodeErrorLogger : public ocsdDefaultErrorLogger {
38 public:
DecodeErrorLogger(const std::function<void (const ocsdError &)> & error_callback)39 DecodeErrorLogger(const std::function<void(const ocsdError&)>& error_callback)
40 : error_callback_(error_callback) {
41 initErrorLogger(OCSD_ERR_SEV_INFO, false);
42 msg_logger_.setLogOpts(ocsdMsgLogger::OUT_STR_CB);
43 msg_logger_.setStrOutFn(&log_str_);
44 setOutputLogger(&msg_logger_);
45 }
46
LogError(const ocsd_hndl_err_log_t handle,const ocsdError * error)47 void LogError(const ocsd_hndl_err_log_t handle, const ocsdError* error) override {
48 ocsdDefaultErrorLogger::LogError(handle, error);
49 if (error != nullptr) {
50 error_callback_(*error);
51 }
52 }
53
54 private:
55 std::function<void(const ocsdError&)> error_callback_;
56 DecoderLogStr log_str_;
57 ocsdMsgLogger msg_logger_;
58 };
59
IsRespError(ocsd_datapath_resp_t resp)60 static bool IsRespError(ocsd_datapath_resp_t resp) {
61 return resp >= OCSD_RESP_ERR_CONT;
62 }
63
64 // Used instead of DecodeTree in OpenCSD to avoid linking decoders not for ETMV4 instruction tracing
65 // in OpenCSD.
66 class ETMV4IDecodeTree {
67 public:
ETMV4IDecodeTree()68 ETMV4IDecodeTree()
69 : error_logger_(std::bind(&ETMV4IDecodeTree::ProcessError, this, std::placeholders::_1)) {
70 ocsd_err_t err = frame_decoder_.Init();
71 CHECK_EQ(err, OCSD_OK);
72 err = frame_decoder_.Configure(OCSD_DFRMTR_FRAME_MEM_ALIGN);
73 CHECK_EQ(err, OCSD_OK);
74 frame_decoder_.getErrLogAttachPt()->attach(&error_logger_);
75 }
76
CreateDecoder(const EtmV4Config * config)77 bool CreateDecoder(const EtmV4Config* config) {
78 uint8_t trace_id = config->getTraceID();
79 auto packet_decoder = std::make_unique<TrcPktProcEtmV4I>(trace_id);
80 packet_decoder->setProtocolConfig(config);
81 packet_decoder->getErrorLogAttachPt()->replace_first(&error_logger_);
82 frame_decoder_.getIDStreamAttachPt(trace_id)->attach(packet_decoder.get());
83 auto result = packet_decoders_.emplace(trace_id, packet_decoder.release());
84 if (!result.second) {
85 LOG(ERROR) << "trace id " << trace_id << " has been used";
86 }
87 return result.second;
88 }
89
AttachPacketSink(uint8_t trace_id,IPktDataIn<EtmV4ITrcPacket> & packet_sink)90 void AttachPacketSink(uint8_t trace_id, IPktDataIn<EtmV4ITrcPacket>& packet_sink) {
91 auto& packet_decoder = packet_decoders_[trace_id];
92 CHECK(packet_decoder);
93 packet_decoder->getPacketOutAttachPt()->replace_first(&packet_sink);
94 }
95
AttachPacketMonitor(uint8_t trace_id,IPktRawDataMon<EtmV4ITrcPacket> & packet_monitor)96 void AttachPacketMonitor(uint8_t trace_id, IPktRawDataMon<EtmV4ITrcPacket>& packet_monitor) {
97 auto& packet_decoder = packet_decoders_[trace_id];
98 CHECK(packet_decoder);
99 packet_decoder->getRawPacketMonAttachPt()->replace_first(&packet_monitor);
100 }
101
AttachRawFramePrinter(RawFramePrinter & frame_printer)102 void AttachRawFramePrinter(RawFramePrinter& frame_printer) {
103 frame_decoder_.Configure(frame_decoder_.getConfigFlags() | OCSD_DFRMTR_PACKED_RAW_OUT);
104 frame_decoder_.getTrcRawFrameAttachPt()->replace_first(&frame_printer);
105 }
106
GetFormattedDataIn()107 ITrcDataIn& GetFormattedDataIn() { return frame_decoder_; }
108
GetUnformattedDataIn(uint8_t trace_id)109 ITrcDataIn& GetUnformattedDataIn(uint8_t trace_id) {
110 auto& decoder = packet_decoders_[trace_id];
111 CHECK(decoder);
112 return *decoder;
113 }
114
ProcessError(const ocsdError & error)115 void ProcessError(const ocsdError& error) {
116 if (error.getErrorCode() == OCSD_ERR_INVALID_PCKT_HDR) {
117 // Found an invalid packet header, following packets for this trace id may also be invalid.
118 // So reset the decoder to find I_ASYNC packet in the data stream.
119 if (auto it = packet_decoders_.find(error.getErrorChanID()); it != packet_decoders_.end()) {
120 auto& packet_decoder = it->second;
121 CHECK(packet_decoder);
122 packet_decoder->TraceDataIn(OCSD_OP_RESET, error.getErrorIndex(), 0, nullptr, nullptr);
123 }
124 }
125 }
126
ErrorLogger()127 DecodeErrorLogger& ErrorLogger() { return error_logger_; }
128
129 private:
130 DecodeErrorLogger error_logger_;
131 TraceFormatterFrameDecoder frame_decoder_;
132 std::unordered_map<uint8_t, std::unique_ptr<TrcPktProcEtmV4I>> packet_decoders_;
133 };
134
135 // Similar to IPktDataIn<EtmV4ITrcPacket>, but add trace id.
136 struct PacketCallback {
137 // packet callbacks are called in priority order.
138 enum Priority {
139 MAP_LOCATOR,
140 BRANCH_LIST_PARSER,
141 PACKET_TO_ELEMENT,
142 };
143
PacketCallbacksimpleperf::__anonaa13da710111::PacketCallback144 PacketCallback(Priority prio) : priority(prio) {}
~PacketCallbacksimpleperf::__anonaa13da710111::PacketCallback145 virtual ~PacketCallback() {}
146 virtual ocsd_datapath_resp_t ProcessPacket(uint8_t trace_id, ocsd_datapath_op_t op,
147 ocsd_trc_index_t index_sop,
148 const EtmV4ITrcPacket* pkt) = 0;
149 const Priority priority;
150 };
151
152 // Receives packets from a packet decoder in OpenCSD library.
153 class PacketSink : public IPktDataIn<EtmV4ITrcPacket> {
154 public:
PacketSink(uint8_t trace_id)155 PacketSink(uint8_t trace_id) : trace_id_(trace_id) {}
156
AddCallback(PacketCallback * callback)157 void AddCallback(PacketCallback* callback) {
158 auto it = std::lower_bound(callbacks_.begin(), callbacks_.end(), callback,
159 [](const PacketCallback* c1, const PacketCallback* c2) {
160 return c1->priority < c2->priority;
161 });
162 callbacks_.insert(it, callback);
163 }
164
PacketDataIn(ocsd_datapath_op_t op,ocsd_trc_index_t index_sop,const EtmV4ITrcPacket * pkt)165 ocsd_datapath_resp_t PacketDataIn(ocsd_datapath_op_t op, ocsd_trc_index_t index_sop,
166 const EtmV4ITrcPacket* pkt) override {
167 for (auto& callback : callbacks_) {
168 auto resp = callback->ProcessPacket(trace_id_, op, index_sop, pkt);
169 if (IsRespError(resp)) {
170 return resp;
171 }
172 }
173 return OCSD_RESP_CONT;
174 }
175
176 private:
177 uint8_t trace_id_;
178 std::vector<PacketCallback*> callbacks_;
179 };
180
181 // For each trace_id, when given an addr, find the thread and map it belongs to.
182 class MapLocator : public PacketCallback {
183 public:
MapLocator(ETMThreadTree & thread_tree)184 MapLocator(ETMThreadTree& thread_tree)
185 : PacketCallback(PacketCallback::MAP_LOCATOR), thread_tree_(thread_tree) {}
186
187 // Return current thread id of a trace_id. If not available, return -1.
GetTid(uint8_t trace_id) const188 pid_t GetTid(uint8_t trace_id) const { return trace_data_[trace_id].tid; }
189
ProcessPacket(uint8_t trace_id,ocsd_datapath_op_t op,ocsd_trc_index_t index_sop,const EtmV4ITrcPacket * pkt)190 ocsd_datapath_resp_t ProcessPacket(uint8_t trace_id, ocsd_datapath_op_t op,
191 ocsd_trc_index_t index_sop,
192 const EtmV4ITrcPacket* pkt) override {
193 TraceData& data = trace_data_[trace_id];
194 if (op == OCSD_OP_DATA) {
195 if (pkt != nullptr && ((!data.use_vmid && pkt->getContext().updated_c) ||
196 (data.use_vmid && pkt->getContext().updated_v))) {
197 int32_t new_tid =
198 static_cast<int32_t>(data.use_vmid ? pkt->getContext().VMID : pkt->getContext().ctxtID);
199 if (data.tid != new_tid) {
200 data.tid = new_tid;
201 data.thread = nullptr;
202 data.userspace_map = nullptr;
203 }
204 }
205 } else if (op == OCSD_OP_RESET) {
206 data.tid = -1;
207 data.thread = nullptr;
208 data.userspace_map = nullptr;
209 }
210 return OCSD_RESP_CONT;
211 }
212
FindMap(uint8_t trace_id,uint64_t addr)213 const MapEntry* FindMap(uint8_t trace_id, uint64_t addr) {
214 TraceData& data = trace_data_[trace_id];
215 if (data.userspace_map != nullptr && data.userspace_map->Contains(addr)) {
216 return data.userspace_map;
217 }
218 if (data.tid == -1) {
219 return nullptr;
220 }
221 if (data.thread == nullptr) {
222 data.thread = thread_tree_.FindThread(data.tid);
223 if (data.thread == nullptr) {
224 return nullptr;
225 }
226 }
227 data.userspace_map = data.thread->maps->FindMapByAddr(addr);
228 if (data.userspace_map != nullptr) {
229 return data.userspace_map;
230 }
231 // We don't cache kernel map. Because kernel map can start from 0 and overlap all userspace
232 // maps.
233 return thread_tree_.GetKernelMaps().FindMapByAddr(addr);
234 }
235
SetUseVmid(uint8_t trace_id,bool value)236 void SetUseVmid(uint8_t trace_id, bool value) { trace_data_[trace_id].use_vmid = value; }
237
238 private:
239 struct TraceData {
240 int32_t tid = -1; // thread id, -1 if invalid
241 const ThreadEntry* thread = nullptr;
242 const MapEntry* userspace_map = nullptr;
243 bool use_vmid = false; // use vmid for PID
244 };
245
246 ETMThreadTree& thread_tree_;
247 TraceData trace_data_[256];
248 };
249
250 // Map (trace_id, ip address) to (binary_path, binary_offset), and read binary files.
251 class MemAccess : public ITargetMemAccess {
252 public:
MemAccess(MapLocator & map_locator)253 MemAccess(MapLocator& map_locator) : map_locator_(map_locator) {}
254
ReadTargetMemory(const ocsd_vaddr_t address,uint8_t trace_id,ocsd_mem_space_acc_t,uint32_t * num_bytes,uint8_t * p_buffer)255 ocsd_err_t ReadTargetMemory(const ocsd_vaddr_t address, uint8_t trace_id, ocsd_mem_space_acc_t,
256 uint32_t* num_bytes, uint8_t* p_buffer) override {
257 TraceData& data = trace_data_[trace_id];
258 const MapEntry* map = map_locator_.FindMap(trace_id, address);
259 // fast path
260 if (map != nullptr && map == data.buffer_map && address >= data.buffer_start &&
261 address + *num_bytes <= data.buffer_end) {
262 if (data.buffer == nullptr) {
263 *num_bytes = 0;
264 } else {
265 memcpy(p_buffer, data.buffer + (address - data.buffer_start), *num_bytes);
266 }
267 return OCSD_OK;
268 }
269
270 // slow path
271 size_t copy_size = 0;
272 if (map != nullptr) {
273 llvm::MemoryBuffer* memory = GetMemoryBuffer(map->dso);
274 if (memory != nullptr) {
275 if (auto opt_offset = map->dso->IpToFileOffset(address, map->start_addr, map->pgoff);
276 opt_offset) {
277 uint64_t offset = opt_offset.value();
278 size_t file_size = memory->getBufferSize();
279 copy_size = file_size > offset ? std::min<size_t>(file_size - offset, *num_bytes) : 0;
280 if (copy_size > 0) {
281 memcpy(p_buffer, memory->getBufferStart() + offset, copy_size);
282 }
283 }
284 }
285 // Update the last buffer cache.
286 // Don't cache for the kernel map. Because simpleperf doesn't record an accurate kernel end
287 // addr.
288 if (!map->in_kernel) {
289 data.buffer_map = map;
290 data.buffer_start = map->start_addr;
291 data.buffer_end = map->get_end_addr();
292 if (memory != nullptr && memory->getBufferSize() > map->pgoff &&
293 (memory->getBufferSize() - map->pgoff >= map->len)) {
294 data.buffer = memory->getBufferStart() + map->pgoff;
295 } else {
296 data.buffer = nullptr;
297 }
298 }
299 }
300 *num_bytes = copy_size;
301 return OCSD_OK;
302 }
303
InvalidateMemAccCache(const uint8_t cs_trace_id)304 void InvalidateMemAccCache(const uint8_t cs_trace_id) override {}
305
306 private:
GetMemoryBuffer(Dso * dso)307 llvm::MemoryBuffer* GetMemoryBuffer(Dso* dso) {
308 auto it = elf_map_.find(dso);
309 if (it == elf_map_.end()) {
310 ElfStatus status;
311 auto res = elf_map_.emplace(dso, ElfFile::Open(dso->GetDebugFilePath(), &status));
312 it = res.first;
313 }
314 return it->second ? it->second->GetMemoryBuffer() : nullptr;
315 }
316
317 struct TraceData {
318 const MapEntry* buffer_map = nullptr;
319 const char* buffer = nullptr;
320 uint64_t buffer_start = 0;
321 uint64_t buffer_end = 0;
322 };
323
324 MapLocator& map_locator_;
325 std::unordered_map<Dso*, std::unique_ptr<ElfFile>> elf_map_;
326 TraceData trace_data_[256];
327 };
328
329 class InstructionDecoder : public TrcIDecode {
330 public:
DecodeInstruction(ocsd_instr_info * instr_info)331 ocsd_err_t DecodeInstruction(ocsd_instr_info* instr_info) {
332 this->instr_info = instr_info;
333 return TrcIDecode::DecodeInstruction(instr_info);
334 }
335
336 ocsd_instr_info* instr_info;
337 };
338
339 // Similar to ITrcGenElemIn, but add next instruction info, which is needed to get branch to addr
340 // for an InstructionRange element.
341 struct ElementCallback {
342 public:
~ElementCallbacksimpleperf::__anonaa13da710111::ElementCallback343 virtual ~ElementCallback(){};
344 virtual ocsd_datapath_resp_t ProcessElement(ocsd_trc_index_t index_sop, uint8_t trace_id,
345 const OcsdTraceElement& elem,
346 const ocsd_instr_info* next_instr) = 0;
347 };
348
349 // Decode packets into elements.
350 class PacketToElement : public PacketCallback, public ITrcGenElemIn {
351 public:
PacketToElement(MapLocator & map_locator,const std::unordered_map<uint8_t,std::unique_ptr<EtmV4Config>> & configs,DecodeErrorLogger & error_logger)352 PacketToElement(MapLocator& map_locator,
353 const std::unordered_map<uint8_t, std::unique_ptr<EtmV4Config>>& configs,
354 DecodeErrorLogger& error_logger)
355 : PacketCallback(PacketCallback::PACKET_TO_ELEMENT), mem_access_(map_locator) {
356 for (auto& p : configs) {
357 uint8_t trace_id = p.first;
358 const EtmV4Config* config = p.second.get();
359 element_decoders_.emplace(trace_id, trace_id);
360 auto& decoder = element_decoders_[trace_id];
361 decoder.setProtocolConfig(config);
362 decoder.getErrorLogAttachPt()->replace_first(&error_logger);
363 decoder.getInstrDecodeAttachPt()->replace_first(&instruction_decoder_);
364 decoder.getMemoryAccessAttachPt()->replace_first(&mem_access_);
365 decoder.getTraceElemOutAttachPt()->replace_first(this);
366 }
367 }
368
AddCallback(ElementCallback * callback)369 void AddCallback(ElementCallback* callback) { callbacks_.push_back(callback); }
370
ProcessPacket(uint8_t trace_id,ocsd_datapath_op_t op,ocsd_trc_index_t index_sop,const EtmV4ITrcPacket * pkt)371 ocsd_datapath_resp_t ProcessPacket(uint8_t trace_id, ocsd_datapath_op_t op,
372 ocsd_trc_index_t index_sop,
373 const EtmV4ITrcPacket* pkt) override {
374 return element_decoders_[trace_id].PacketDataIn(op, index_sop, pkt);
375 }
376
TraceElemIn(const ocsd_trc_index_t index_sop,uint8_t trc_chan_id,const OcsdTraceElement & elem)377 ocsd_datapath_resp_t TraceElemIn(const ocsd_trc_index_t index_sop, uint8_t trc_chan_id,
378 const OcsdTraceElement& elem) override {
379 for (auto& callback : callbacks_) {
380 auto resp =
381 callback->ProcessElement(index_sop, trc_chan_id, elem, instruction_decoder_.instr_info);
382 if (IsRespError(resp)) {
383 return resp;
384 }
385 }
386 return OCSD_RESP_CONT;
387 }
388
389 private:
390 // map from trace id of an etm device to its element decoder
391 std::unordered_map<uint8_t, TrcPktDecodeEtmV4I> element_decoders_;
392 MemAccess mem_access_;
393 InstructionDecoder instruction_decoder_;
394 std::vector<ElementCallback*> callbacks_;
395 };
396
397 // Dump etm data generated at different stages.
398 class DataDumper : public ElementCallback {
399 public:
DataDumper(ETMV4IDecodeTree & decode_tree)400 DataDumper(ETMV4IDecodeTree& decode_tree) : decode_tree_(decode_tree) {}
401
DumpRawData()402 void DumpRawData() {
403 decode_tree_.AttachRawFramePrinter(frame_printer_);
404 frame_printer_.setMessageLogger(&stdout_logger_);
405 }
406
DumpPackets(const std::unordered_map<uint8_t,std::unique_ptr<EtmV4Config>> & configs)407 void DumpPackets(const std::unordered_map<uint8_t, std::unique_ptr<EtmV4Config>>& configs) {
408 for (auto& p : configs) {
409 uint8_t trace_id = p.first;
410 auto result = packet_printers_.emplace(trace_id, trace_id);
411 CHECK(result.second);
412 auto& packet_printer = result.first->second;
413 decode_tree_.AttachPacketMonitor(trace_id, packet_printer);
414 packet_printer.setMessageLogger(&stdout_logger_);
415 }
416 }
417
DumpElements()418 void DumpElements() { element_printer_.setMessageLogger(&stdout_logger_); }
419
ProcessElement(ocsd_trc_index_t index_sop,uint8_t trc_chan_id,const OcsdTraceElement & elem,const ocsd_instr_info *)420 ocsd_datapath_resp_t ProcessElement(ocsd_trc_index_t index_sop, uint8_t trc_chan_id,
421 const OcsdTraceElement& elem, const ocsd_instr_info*) {
422 return element_printer_.TraceElemIn(index_sop, trc_chan_id, elem);
423 }
424
425 private:
426 ETMV4IDecodeTree& decode_tree_;
427 RawFramePrinter frame_printer_;
428 std::unordered_map<uint8_t, PacketPrinter<EtmV4ITrcPacket>> packet_printers_;
429 TrcGenericElementPrinter element_printer_;
430 ocsdMsgLogger stdout_logger_;
431 };
432
433 // It decodes each ETMV4IPacket into TraceElements, and generates ETMInstrRanges from TraceElements.
434 // Decoding each packet is slow, but ensures correctness.
435 class InstrRangeParser : public ElementCallback {
436 private:
437 struct TraceData {
438 ETMInstrRange instr_range;
439 bool wait_for_branch_to_addr_fix = false;
440 };
441
442 public:
InstrRangeParser(MapLocator & map_locator,const ETMDecoder::InstrRangeCallbackFn & callback)443 InstrRangeParser(MapLocator& map_locator, const ETMDecoder::InstrRangeCallbackFn& callback)
444 : map_locator_(map_locator), callback_(callback) {}
445
ProcessElement(const ocsd_trc_index_t,uint8_t trace_id,const OcsdTraceElement & elem,const ocsd_instr_info * next_instr)446 ocsd_datapath_resp_t ProcessElement(const ocsd_trc_index_t, uint8_t trace_id,
447 const OcsdTraceElement& elem,
448 const ocsd_instr_info* next_instr) override {
449 if (elem.getType() == OCSD_GEN_TRC_ELEM_INSTR_RANGE) {
450 TraceData& data = trace_data_[trace_id];
451 const MapEntry* map = map_locator_.FindMap(trace_id, elem.st_addr);
452 if (map == nullptr) {
453 FlushData(data);
454 return OCSD_RESP_CONT;
455 }
456 uint64_t start_addr = map->GetVaddrInFile(elem.st_addr);
457 auto& instr_range = data.instr_range;
458
459 if (data.wait_for_branch_to_addr_fix) {
460 // OpenCSD may cache a list of InstrRange elements, making it inaccurate to get branch to
461 // address from next_instr->branch_addr. So fix it by using the start address of the next
462 // InstrRange element.
463 instr_range.branch_to_addr = start_addr;
464 }
465 FlushData(data);
466 instr_range.dso = map->dso;
467 instr_range.start_addr = start_addr;
468 instr_range.end_addr = map->GetVaddrInFile(elem.en_addr - elem.last_instr_sz);
469 bool end_with_branch =
470 elem.last_i_type == OCSD_INSTR_BR || elem.last_i_type == OCSD_INSTR_BR_INDIRECT;
471 bool branch_taken = end_with_branch && elem.last_instr_exec;
472 if (elem.last_i_type == OCSD_INSTR_BR && branch_taken) {
473 // It is based on the assumption that we only do immediate branch inside a binary,
474 // which may not be true for all cases. TODO: http://b/151665001.
475 instr_range.branch_to_addr = map->GetVaddrInFile(next_instr->branch_addr);
476 data.wait_for_branch_to_addr_fix = true;
477 } else {
478 instr_range.branch_to_addr = 0;
479 }
480 instr_range.branch_taken_count = branch_taken ? 1 : 0;
481 instr_range.branch_not_taken_count = branch_taken ? 0 : 1;
482
483 } else if (elem.getType() == OCSD_GEN_TRC_ELEM_TRACE_ON) {
484 // According to the ETM Specification, the Trace On element indicates a discontinuity in the
485 // instruction trace stream. So it cuts the connection between instr ranges.
486 FlushData(trace_data_[trace_id]);
487 }
488 return OCSD_RESP_CONT;
489 }
490
FinishData()491 void FinishData() {
492 for (auto& pair : trace_data_) {
493 FlushData(pair.second);
494 }
495 }
496
497 private:
FlushData(TraceData & data)498 void FlushData(TraceData& data) {
499 if (data.instr_range.dso != nullptr) {
500 callback_(data.instr_range);
501 data.instr_range.dso = nullptr;
502 }
503 data.wait_for_branch_to_addr_fix = false;
504 }
505
506 MapLocator& map_locator_;
507 std::unordered_map<uint8_t, TraceData> trace_data_;
508 ETMDecoder::InstrRangeCallbackFn callback_;
509 };
510
511 // It parses ETMBranchLists from ETMV4IPackets.
512 // It doesn't do element decoding and instruction decoding, thus is about 5 timers faster than
513 // InstrRangeParser. But some data will be lost when converting ETMBranchLists to InstrRanges:
514 // 1. InstrRanges described by Except packets (the last instructions executed before exeception,
515 // about 2%?).
516 // 2. Branch to addresses of direct branch instructions across binaries.
517 class BranchListParser : public PacketCallback {
518 private:
519 struct TraceData {
520 uint64_t addr = 0;
521 uint8_t addr_valid_bits = 0;
522 uint8_t isa = 0;
523 bool invalid_branch = false;
524 ETMBranchList branch;
525 };
526
527 public:
BranchListParser(MapLocator & map_locator,const ETMDecoder::BranchListCallbackFn & callback)528 BranchListParser(MapLocator& map_locator, const ETMDecoder::BranchListCallbackFn& callback)
529 : PacketCallback(BRANCH_LIST_PARSER), map_locator_(map_locator), callback_(callback) {}
530
CheckConfigs(std::unordered_map<uint8_t,std::unique_ptr<EtmV4Config>> & configs)531 void CheckConfigs(std::unordered_map<uint8_t, std::unique_ptr<EtmV4Config>>& configs) {
532 // TODO: Current implementation doesn't support non-zero speculation length and return stack.
533 for (auto& p : configs) {
534 if (p.second->MaxSpecDepth() > 0) {
535 LOG(WARNING) << "branch list collection isn't accurate with non-zero speculation length";
536 break;
537 }
538 }
539 for (auto& p : configs) {
540 if (p.second->enabledRetStack()) {
541 LOG(WARNING) << "branch list collection will lose some data with return stack enabled";
542 break;
543 }
544 }
545 }
546
IsAddrPacket(const EtmV4ITrcPacket * pkt)547 bool IsAddrPacket(const EtmV4ITrcPacket* pkt) {
548 return pkt->getType() >= ETM4_PKT_I_ADDR_CTXT_L_32IS0 &&
549 pkt->getType() <= ETM4_PKT_I_ADDR_L_64IS1;
550 }
551
IsAtomPacket(const EtmV4ITrcPacket * pkt)552 bool IsAtomPacket(const EtmV4ITrcPacket* pkt) { return pkt->getAtom().num > 0; }
553
ProcessPacket(uint8_t trace_id,ocsd_datapath_op_t op,ocsd_trc_index_t,const EtmV4ITrcPacket * pkt)554 ocsd_datapath_resp_t ProcessPacket(uint8_t trace_id, ocsd_datapath_op_t op,
555 ocsd_trc_index_t /*index_sop */,
556 const EtmV4ITrcPacket* pkt) override {
557 TraceData& data = trace_data_[trace_id];
558 if (op == OCSD_OP_DATA) {
559 if (IsAddrPacket(pkt)) {
560 // Flush branch when seeing an Addr packet. Because it isn't correct to concatenate
561 // branches before and after an Addr packet.
562 FlushBranch(data);
563 data.addr = pkt->getAddrVal();
564 data.addr_valid_bits = pkt->v_addr.valid_bits;
565 data.isa = pkt->getAddrIS();
566 }
567
568 if (IsAtomPacket(pkt)) {
569 // An atom packet contains a branch list. We may receive one or more atom packets in a row,
570 // and need to concatenate them.
571 ProcessAtomPacket(trace_id, data, pkt);
572 }
573
574 } else {
575 // Flush branch when seeing a flush or reset operation.
576 FlushBranch(data);
577 if (op == OCSD_OP_RESET) {
578 data.addr = 0;
579 data.addr_valid_bits = 0;
580 data.isa = 0;
581 data.invalid_branch = false;
582 }
583 }
584 return OCSD_RESP_CONT;
585 }
586
FinishData()587 void FinishData() {
588 for (auto& pair : trace_data_) {
589 FlushBranch(pair.second);
590 }
591 }
592
593 private:
ProcessAtomPacket(uint8_t trace_id,TraceData & data,const EtmV4ITrcPacket * pkt)594 void ProcessAtomPacket(uint8_t trace_id, TraceData& data, const EtmV4ITrcPacket* pkt) {
595 if (data.invalid_branch) {
596 return; // Skip atom packets when we think a branch list is invalid.
597 }
598 if (data.branch.branch.empty()) {
599 // This is the first atom packet in a branch list. Check if we have tid and addr info to
600 // parse it and the following atom packets. If not, mark the branch list as invalid.
601 if (map_locator_.GetTid(trace_id) == -1 || data.addr_valid_bits == 0) {
602 data.invalid_branch = true;
603 return;
604 }
605 const MapEntry* map = map_locator_.FindMap(trace_id, data.addr);
606 if (map == nullptr) {
607 data.invalid_branch = true;
608 return;
609 }
610 data.branch.dso = map->dso;
611 data.branch.addr = map->GetVaddrInFile(data.addr);
612 if (data.isa == 1) { // thumb instruction, mark it in bit 0.
613 data.branch.addr |= 1;
614 }
615 }
616 uint32_t bits = pkt->atom.En_bits;
617 for (size_t i = 0; i < pkt->atom.num; i++) {
618 data.branch.branch.push_back((bits & 1) == 1);
619 bits >>= 1;
620 }
621 }
622
FlushBranch(TraceData & data)623 void FlushBranch(TraceData& data) {
624 if (!data.branch.branch.empty()) {
625 callback_(data.branch);
626 data.branch.branch.clear();
627 }
628 data.invalid_branch = false;
629 }
630
631 MapLocator& map_locator_;
632 ETMDecoder::BranchListCallbackFn callback_;
633 std::unordered_map<uint8_t, TraceData> trace_data_;
634 };
635
636 // Etm data decoding in OpenCSD library has two steps:
637 // 1. From byte stream to etm packets. Each packet shows an event happened. For example,
638 // an Address packet shows the cpu is running the instruction at that address, an Atom
639 // packet shows whether the cpu decides to branch or not.
640 // 2. From etm packets to trace elements. To generates elements, the decoder needs both etm
641 // packets and executed binaries. For example, an InstructionRange element needs the decoder
642 // to find the next branch instruction starting from an address.
643 //
644 // ETMDecoderImpl uses OpenCSD library to decode etm data. It has the following properties:
645 // 1. Supports flexible decoding strategy. It allows installing packet callbacks and element
646 // callbacks, and decodes to either packets or elements based on requirements.
647 // 2. Supports dumping data at different stages.
648 class ETMDecoderImpl : public ETMDecoder {
649 public:
ETMDecoderImpl(ETMThreadTree & thread_tree)650 ETMDecoderImpl(ETMThreadTree& thread_tree) : thread_tree_(thread_tree) {
651 // If the aux record for a thread is processed after it's thread exit record, we can't find
652 // the thread's maps when processing ETM data. To handle this, disable thread exit records.
653 thread_tree.DisableThreadExitRecords();
654 }
655
CreateDecodeTree(const AuxTraceInfoRecord & auxtrace_info)656 void CreateDecodeTree(const AuxTraceInfoRecord& auxtrace_info) {
657 uint8_t trace_id = 0;
658 uint64_t* info = auxtrace_info.data->info;
659 for (int i = 0; i < auxtrace_info.data->nr_cpu; i++) {
660 if (info[0] == AuxTraceInfoRecord::MAGIC_ETM4) {
661 auto& etm4 = *reinterpret_cast<AuxTraceInfoRecord::ETM4Info*>(info);
662 ocsd_etmv4_cfg cfg;
663 memset(&cfg, 0, sizeof(cfg));
664 cfg.reg_idr0 = etm4.trcidr0;
665 cfg.reg_idr1 = etm4.trcidr1;
666 cfg.reg_idr2 = etm4.trcidr2;
667 cfg.reg_idr8 = etm4.trcidr8;
668 cfg.reg_configr = etm4.trcconfigr;
669 cfg.reg_traceidr = etm4.trctraceidr;
670 cfg.arch_ver = ARCH_V8;
671 cfg.core_prof = profile_CortexA;
672 trace_id = cfg.reg_traceidr & 0x7f;
673 trace_ids_.emplace(etm4.cpu, trace_id);
674 configs_.emplace(trace_id, new EtmV4Config(&cfg));
675 info = reinterpret_cast<uint64_t*>(&etm4 + 1);
676 } else {
677 CHECK_EQ(info[0], AuxTraceInfoRecord::MAGIC_ETE);
678 auto& ete = *reinterpret_cast<AuxTraceInfoRecord::ETEInfo*>(info);
679 ocsd_ete_cfg cfg;
680 memset(&cfg, 0, sizeof(cfg));
681 cfg.reg_idr0 = ete.trcidr0;
682 cfg.reg_idr1 = ete.trcidr1;
683 cfg.reg_idr2 = ete.trcidr2;
684 cfg.reg_idr8 = ete.trcidr8;
685 cfg.reg_devarch = ete.trcdevarch;
686 cfg.reg_configr = ete.trcconfigr;
687 cfg.reg_traceidr = ete.trctraceidr;
688 cfg.arch_ver = ARCH_AA64;
689 cfg.core_prof = profile_CortexA;
690 trace_id = cfg.reg_traceidr & 0x7f;
691 trace_ids_.emplace(ete.cpu, trace_id);
692 configs_.emplace(trace_id, new ETEConfig(&cfg));
693 info = reinterpret_cast<uint64_t*>(&ete + 1);
694 }
695 decode_tree_.CreateDecoder(configs_[trace_id].get());
696 auto result = packet_sinks_.emplace(trace_id, trace_id);
697 CHECK(result.second);
698 decode_tree_.AttachPacketSink(trace_id, result.first->second);
699 }
700 }
701
EnableDump(const ETMDumpOption & option)702 void EnableDump(const ETMDumpOption& option) override {
703 dumper_.reset(new DataDumper(decode_tree_));
704 if (option.dump_raw_data) {
705 dumper_->DumpRawData();
706 }
707 if (option.dump_packets) {
708 dumper_->DumpPackets(configs_);
709 }
710 if (option.dump_elements) {
711 dumper_->DumpElements();
712 InstallElementCallback(dumper_.get());
713 }
714 }
715
RegisterCallback(const InstrRangeCallbackFn & callback)716 void RegisterCallback(const InstrRangeCallbackFn& callback) {
717 InstallMapLocator();
718 instr_range_parser_.reset(new InstrRangeParser(*map_locator_, callback));
719 InstallElementCallback(instr_range_parser_.get());
720 }
721
RegisterCallback(const BranchListCallbackFn & callback)722 void RegisterCallback(const BranchListCallbackFn& callback) {
723 InstallMapLocator();
724 branch_list_parser_.reset(new BranchListParser(*map_locator_, callback));
725 branch_list_parser_->CheckConfigs(configs_);
726 InstallPacketCallback(branch_list_parser_.get());
727 }
728
ProcessData(const uint8_t * data,size_t size,bool formatted,uint32_t cpu)729 bool ProcessData(const uint8_t* data, size_t size, bool formatted, uint32_t cpu) override {
730 // Reset decoders before processing each data block. Because:
731 // 1. Data blocks are not continuous. So decoders shouldn't keep previous states when
732 // processing a new block.
733 // 2. The beginning part of a data block may be truncated if kernel buffer is temporarily full.
734 // So we may see garbage data, which can cause decoding errors if we don't reset decoders.
735 LOG(DEBUG) << "Processing " << (!formatted ? "un" : "") << "formatted data with size " << size;
736 auto& decoder = formatted ? decode_tree_.GetFormattedDataIn()
737 : decode_tree_.GetUnformattedDataIn(trace_ids_[cpu]);
738
739 auto resp = decoder.TraceDataIn(OCSD_OP_RESET, data_index_, 0, nullptr, nullptr);
740 if (IsRespError(resp)) {
741 LOG(ERROR) << "failed to reset decoder, resp " << resp;
742 return false;
743 }
744 size_t left_size = size;
745 const size_t MAX_RESET_RETRY_COUNT = 3;
746 size_t reset_retry_count = 0;
747 while (left_size > 0) {
748 uint32_t processed;
749 auto resp = decoder.TraceDataIn(OCSD_OP_DATA, data_index_, left_size, data, &processed);
750 if (IsRespError(resp)) {
751 // A decoding error shouldn't ruin all data. Reset decoders to recover from it.
752 // But some errors may not be recoverable by resetting decoders. So use a max retry limit.
753 if (++reset_retry_count > MAX_RESET_RETRY_COUNT) {
754 break;
755 }
756 LOG(DEBUG) << "reset etm decoders for seeing a decode failure, resp " << resp
757 << ", reset_retry_count is " << reset_retry_count;
758 decoder.TraceDataIn(OCSD_OP_RESET, data_index_ + processed, 0, nullptr, nullptr);
759 }
760 data += processed;
761 left_size -= processed;
762 data_index_ += processed;
763 }
764 return true;
765 }
766
FinishData()767 bool FinishData() override {
768 if (instr_range_parser_) {
769 instr_range_parser_->FinishData();
770 }
771 if (branch_list_parser_) {
772 branch_list_parser_->FinishData();
773 }
774 return true;
775 }
776
777 private:
InstallMapLocator()778 void InstallMapLocator() {
779 if (!map_locator_) {
780 map_locator_.reset(new MapLocator(thread_tree_));
781 for (auto& cfg : configs_) {
782 int64_t configr = (*(const ocsd_etmv4_cfg*)*cfg.second).reg_configr;
783 map_locator_->SetUseVmid(cfg.first,
784 configr & (1U << ETM4_CFG_BIT_VMID | 1U << ETM4_CFG_BIT_VMID_OPT));
785 }
786
787 InstallPacketCallback(map_locator_.get());
788 }
789 }
790
InstallPacketCallback(PacketCallback * callback)791 void InstallPacketCallback(PacketCallback* callback) {
792 for (auto& p : packet_sinks_) {
793 p.second.AddCallback(callback);
794 }
795 }
796
InstallElementCallback(ElementCallback * callback)797 void InstallElementCallback(ElementCallback* callback) {
798 if (!packet_to_element_) {
799 InstallMapLocator();
800 packet_to_element_.reset(
801 new PacketToElement(*map_locator_, configs_, decode_tree_.ErrorLogger()));
802 InstallPacketCallback(packet_to_element_.get());
803 }
804 packet_to_element_->AddCallback(callback);
805 }
806
807 // map ip address to binary path and binary offset
808 ETMThreadTree& thread_tree_;
809 // handle to build OpenCSD decoder
810 ETMV4IDecodeTree decode_tree_;
811 // map from cpu to trace id
812 std::unordered_map<uint64_t, uint8_t> trace_ids_;
813 // map from the trace id of an etm device to its config
814 std::unordered_map<uint8_t, std::unique_ptr<EtmV4Config>> configs_;
815 // map from the trace id of an etm device to its PacketSink
816 std::unordered_map<uint8_t, PacketSink> packet_sinks_;
817 std::unique_ptr<PacketToElement> packet_to_element_;
818 std::unique_ptr<DataDumper> dumper_;
819 // an index keeping processed etm data size
820 size_t data_index_ = 0;
821 std::unique_ptr<InstrRangeParser> instr_range_parser_;
822 std::unique_ptr<MapLocator> map_locator_;
823 std::unique_ptr<BranchListParser> branch_list_parser_;
824 };
825
826 } // namespace
827
ParseEtmDumpOption(const std::string & s,ETMDumpOption * option)828 bool ParseEtmDumpOption(const std::string& s, ETMDumpOption* option) {
829 for (auto& value : android::base::Split(s, ",")) {
830 if (value == "raw") {
831 option->dump_raw_data = true;
832 } else if (value == "packet") {
833 option->dump_packets = true;
834 } else if (value == "element") {
835 option->dump_elements = true;
836 } else {
837 LOG(ERROR) << "unknown etm dump option: " << value;
838 return false;
839 }
840 }
841 return true;
842 }
843
Create(const AuxTraceInfoRecord & auxtrace_info,ETMThreadTree & thread_tree)844 std::unique_ptr<ETMDecoder> ETMDecoder::Create(const AuxTraceInfoRecord& auxtrace_info,
845 ETMThreadTree& thread_tree) {
846 auto decoder = std::make_unique<ETMDecoderImpl>(thread_tree);
847 decoder->CreateDecodeTree(auxtrace_info);
848 return std::unique_ptr<ETMDecoder>(decoder.release());
849 }
850
851 // Use OpenCSD instruction decoder to convert branches to instruction addresses.
852 class BranchDecoder {
853 public:
Init(Dso * dso)854 android::base::expected<void, std::string> Init(Dso* dso) {
855 ElfStatus status;
856 elf_ = ElfFile::Open(dso->GetDebugFilePath(), &status);
857 if (!elf_) {
858 std::stringstream ss;
859 ss << status;
860 return android::base::unexpected(ss.str());
861 }
862 if (dso->type() == DSO_KERNEL_MODULE) {
863 // Kernel module doesn't have program header. So create a fake one mapping to .text section.
864 for (const auto& section : elf_->GetSectionHeader()) {
865 if (section.name == ".text") {
866 segments_.resize(1);
867 segments_[0].is_executable = true;
868 segments_[0].is_load = true;
869 segments_[0].file_offset = section.file_offset;
870 segments_[0].file_size = section.size;
871 segments_[0].vaddr = section.vaddr;
872 break;
873 }
874 }
875 } else {
876 segments_ = elf_->GetProgramHeader();
877 auto it = std::remove_if(segments_.begin(), segments_.end(),
878 [](const ElfSegment& s) { return !s.is_executable; });
879 segments_.resize(it - segments_.begin());
880 }
881 if (segments_.empty()) {
882 return android::base::unexpected("no segments");
883 }
884 buffer_ = elf_->GetMemoryBuffer();
885 return {};
886 }
887
SetAddr(uint64_t addr,bool is_thumb)888 void SetAddr(uint64_t addr, bool is_thumb) {
889 memset(&instr_info_, 0, sizeof(instr_info_));
890 instr_info_.pe_type.arch = ARCH_V8;
891 instr_info_.pe_type.profile = profile_CortexA;
892 instr_info_.isa =
893 elf_->Is64Bit() ? ocsd_isa_aarch64 : (is_thumb ? ocsd_isa_thumb2 : ocsd_isa_arm);
894 instr_info_.instr_addr = addr;
895 }
896
FindNextBranch()897 bool FindNextBranch() {
898 // Loop until we find a branch instruction.
899 while (ReadMem(instr_info_.instr_addr, 4, &instr_info_.opcode)) {
900 ocsd_err_t err = instruction_decoder_.DecodeInstruction(&instr_info_);
901 if (err != OCSD_OK) {
902 break;
903 }
904 if (instr_info_.type != OCSD_INSTR_OTHER) {
905 return true;
906 }
907 instr_info_.instr_addr += instr_info_.instr_size;
908 }
909 return false;
910 };
911
InstrInfo()912 ocsd_instr_info& InstrInfo() { return instr_info_; }
913
914 private:
ReadMem(uint64_t vaddr,size_t size,void * data)915 bool ReadMem(uint64_t vaddr, size_t size, void* data) {
916 for (auto& segment : segments_) {
917 if (vaddr >= segment.vaddr && vaddr + size <= segment.vaddr + segment.file_size) {
918 uint64_t offset = vaddr - segment.vaddr + segment.file_offset;
919 memcpy(data, buffer_->getBufferStart() + offset, size);
920 return true;
921 }
922 }
923 return false;
924 }
925
926 std::unique_ptr<ElfFile> elf_;
927 std::vector<ElfSegment> segments_;
928 llvm::MemoryBuffer* buffer_ = nullptr;
929 ocsd_instr_info instr_info_;
930 InstructionDecoder instruction_decoder_;
931 };
932
ConvertBranchMapToInstrRanges(Dso * dso,const BranchMap & branch_map,const ETMDecoder::InstrRangeCallbackFn & callback)933 android::base::expected<void, std::string> ConvertBranchMapToInstrRanges(
934 Dso* dso, const BranchMap& branch_map, const ETMDecoder::InstrRangeCallbackFn& callback) {
935 ETMInstrRange instr_range;
936 instr_range.dso = dso;
937
938 BranchDecoder decoder;
939 if (auto result = decoder.Init(dso); !result.ok()) {
940 return result;
941 }
942
943 for (const auto& addr_p : branch_map) {
944 uint64_t start_addr = addr_p.first & ~1ULL;
945 bool is_thumb = addr_p.first & 1;
946 for (const auto& branch_p : addr_p.second) {
947 const std::vector<bool>& branch = branch_p.first;
948 uint64_t count = branch_p.second;
949 decoder.SetAddr(start_addr, is_thumb);
950
951 for (bool b : branch) {
952 ocsd_instr_info& instr = decoder.InstrInfo();
953 uint64_t from_addr = instr.instr_addr;
954 if (!decoder.FindNextBranch()) {
955 break;
956 }
957 bool end_with_branch = instr.type == OCSD_INSTR_BR || instr.type == OCSD_INSTR_BR_INDIRECT;
958 bool branch_taken = end_with_branch && b;
959 instr_range.start_addr = from_addr;
960 instr_range.end_addr = instr.instr_addr;
961 if (instr.type == OCSD_INSTR_BR) {
962 instr_range.branch_to_addr = instr.branch_addr;
963 } else {
964 instr_range.branch_to_addr = 0;
965 }
966 instr_range.branch_taken_count = branch_taken ? count : 0;
967 instr_range.branch_not_taken_count = branch_taken ? 0 : count;
968
969 callback(instr_range);
970
971 if (b) {
972 instr.instr_addr = instr.branch_addr;
973 } else {
974 instr.instr_addr += instr.instr_size;
975 }
976 }
977 }
978 }
979 return {};
980 }
981
982 } // namespace simpleperf
983