• Home
  • Raw
  • Download

Lines Matching +full:magic +full:- +full:packet

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
21 #include "cs-etm.h"
22 #include "cs-etm-decoder/cs-etm-decoder.h"
36 #include "thread-stack.h"
38 #include "util/synthetic-events.h"
79 struct cs_etm_packet *packet; member
131 static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic) in cs_etm__get_magic() argument
138 return -EINVAL; in cs_etm__get_magic()
140 metadata = inode->priv; in cs_etm__get_magic()
141 *magic = metadata[CS_ETM_MAGIC]; in cs_etm__get_magic()
152 return -EINVAL; in cs_etm__get_cpu()
154 metadata = inode->priv; in cs_etm__get_cpu()
163 * Wnen a timestamp packet is encountered the backend code in cs_etm__etmq_set_traceid_queue_timestamp()
169 etmq->pending_timestamp = trace_chan_id; in cs_etm__etmq_set_traceid_queue_timestamp()
177 if (!etmq->pending_timestamp) in cs_etm__etmq_get_timestamp()
181 *trace_chan_id = etmq->pending_timestamp; in cs_etm__etmq_get_timestamp()
184 etmq->pending_timestamp); in cs_etm__etmq_get_timestamp()
189 etmq->pending_timestamp = 0; in cs_etm__etmq_get_timestamp()
192 return packet_queue->timestamp; in cs_etm__etmq_get_timestamp()
199 queue->head = 0; in cs_etm__clear_packet_queue()
200 queue->tail = 0; in cs_etm__clear_packet_queue()
201 queue->packet_count = 0; in cs_etm__clear_packet_queue()
203 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN; in cs_etm__clear_packet_queue()
204 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; in cs_etm__clear_packet_queue()
205 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; in cs_etm__clear_packet_queue()
206 queue->packet_buffer[i].instr_count = 0; in cs_etm__clear_packet_queue()
207 queue->packet_buffer[i].last_instr_taken_branch = false; in cs_etm__clear_packet_queue()
208 queue->packet_buffer[i].last_instr_size = 0; in cs_etm__clear_packet_queue()
209 queue->packet_buffer[i].last_instr_type = 0; in cs_etm__clear_packet_queue()
210 queue->packet_buffer[i].last_instr_subtype = 0; in cs_etm__clear_packet_queue()
211 queue->packet_buffer[i].last_instr_cond = 0; in cs_etm__clear_packet_queue()
212 queue->packet_buffer[i].flags = 0; in cs_etm__clear_packet_queue()
213 queue->packet_buffer[i].exception_number = UINT32_MAX; in cs_etm__clear_packet_queue()
214 queue->packet_buffer[i].trace_chan_id = UINT8_MAX; in cs_etm__clear_packet_queue()
215 queue->packet_buffer[i].cpu = INT_MIN; in cs_etm__clear_packet_queue()
224 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__clear_all_packet_queues()
227 idx = (int)(intptr_t)inode->priv; in cs_etm__clear_all_packet_queues()
228 tidq = etmq->traceid_queues[idx]; in cs_etm__clear_all_packet_queues()
229 cs_etm__clear_packet_queue(&tidq->packet_queue); in cs_etm__clear_all_packet_queues()
237 int rc = -ENOMEM; in cs_etm__init_traceid_queue()
239 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__init_traceid_queue()
241 cs_etm__clear_packet_queue(&tidq->packet_queue); in cs_etm__init_traceid_queue()
243 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__init_traceid_queue()
244 tidq->tid = queue->tid; in cs_etm__init_traceid_queue()
245 tidq->pid = -1; in cs_etm__init_traceid_queue()
246 tidq->trace_chan_id = trace_chan_id; in cs_etm__init_traceid_queue()
248 tidq->packet = zalloc(sizeof(struct cs_etm_packet)); in cs_etm__init_traceid_queue()
249 if (!tidq->packet) in cs_etm__init_traceid_queue()
252 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet)); in cs_etm__init_traceid_queue()
253 if (!tidq->prev_packet) in cs_etm__init_traceid_queue()
256 if (etm->synth_opts.last_branch) { in cs_etm__init_traceid_queue()
259 sz += etm->synth_opts.last_branch_sz * in cs_etm__init_traceid_queue()
261 tidq->last_branch = zalloc(sz); in cs_etm__init_traceid_queue()
262 if (!tidq->last_branch) in cs_etm__init_traceid_queue()
264 tidq->last_branch_rb = zalloc(sz); in cs_etm__init_traceid_queue()
265 if (!tidq->last_branch_rb) in cs_etm__init_traceid_queue()
269 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); in cs_etm__init_traceid_queue()
270 if (!tidq->event_buf) in cs_etm__init_traceid_queue()
276 zfree(&tidq->last_branch_rb); in cs_etm__init_traceid_queue()
277 zfree(&tidq->last_branch); in cs_etm__init_traceid_queue()
278 zfree(&tidq->prev_packet); in cs_etm__init_traceid_queue()
279 zfree(&tidq->packet); in cs_etm__init_traceid_queue()
291 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__etmq_get_traceid_queue()
293 if (etm->timeless_decoding) in cs_etm__etmq_get_traceid_queue()
296 traceid_queues_list = etmq->traceid_queues_list; in cs_etm__etmq_get_traceid_queue()
304 idx = (int)(intptr_t)inode->priv; in cs_etm__etmq_get_traceid_queue()
305 return etmq->traceid_queues[idx]; in cs_etm__etmq_get_traceid_queue()
323 inode->priv = (void *)(intptr_t)idx; in cs_etm__etmq_get_traceid_queue()
329 traceid_queues = etmq->traceid_queues; in cs_etm__etmq_get_traceid_queue()
342 etmq->traceid_queues = traceid_queues; in cs_etm__etmq_get_traceid_queue()
344 return etmq->traceid_queues[idx]; in cs_etm__etmq_get_traceid_queue()
364 return &tidq->packet_queue; in cs_etm__etmq_get_packet_queue()
374 if (etm->sample_branches || etm->synth_opts.last_branch || in cs_etm__packet_swap()
375 etm->sample_instructions) { in cs_etm__packet_swap()
377 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for in cs_etm__packet_swap()
378 * the next incoming packet. in cs_etm__packet_swap()
380 tmp = tidq->packet; in cs_etm__packet_swap()
381 tidq->packet = tidq->prev_packet; in cs_etm__packet_swap()
382 tidq->prev_packet = tmp; in cs_etm__packet_swap()
391 if (len && (pkt_string[len-1] == '\n')) in cs_etm__packet_dump()
403 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv3()
413 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv4()
431 for (i = 0; i < etm->num_cpu; i++) { in cs_etm__init_trace_params()
432 architecture = etm->metadata[i][CS_ETM_MAGIC]; in cs_etm__init_trace_params()
436 etmidr = etm->metadata[i][CS_ETM_ETMIDR]; in cs_etm__init_trace_params()
443 return -EINVAL; in cs_etm__init_trace_params()
454 int ret = -EINVAL; in cs_etm__init_decoder_params()
459 d_params->packet_printer = cs_etm__packet_dump; in cs_etm__init_decoder_params()
460 d_params->operation = mode; in cs_etm__init_decoder_params()
461 d_params->data = etmq; in cs_etm__init_decoder_params()
462 d_params->formatted = true; in cs_etm__init_decoder_params()
463 d_params->fsyncs = false; in cs_etm__init_decoder_params()
464 d_params->hsyncs = false; in cs_etm__init_decoder_params()
465 d_params->frame_aligned = true; in cs_etm__init_decoder_params()
485 buffer->size); in cs_etm__dump_event()
488 t_params = zalloc(sizeof(*t_params) * etm->num_cpu); in cs_etm__dump_event()
501 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params); in cs_etm__dump_event()
509 decoder, buffer->offset, in cs_etm__dump_event()
510 &((u8 *)buffer->data)[buffer_used], in cs_etm__dump_event()
511 buffer->size - buffer_used, &consumed); in cs_etm__dump_event()
516 } while (buffer_used < buffer->size); in cs_etm__dump_event()
528 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__flush_events()
534 if (!tool->ordered_events) in cs_etm__flush_events()
535 return -EINVAL; in cs_etm__flush_events()
542 if (etm->timeless_decoding) in cs_etm__flush_events()
543 return cs_etm__process_timeless_queues(etm, -1); in cs_etm__flush_events()
554 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__free_traceid_queues()
557 priv = (uintptr_t)inode->priv; in cs_etm__free_traceid_queues()
561 tidq = etmq->traceid_queues[idx]; in cs_etm__free_traceid_queues()
562 thread__zput(tidq->thread); in cs_etm__free_traceid_queues()
563 zfree(&tidq->event_buf); in cs_etm__free_traceid_queues()
564 zfree(&tidq->last_branch); in cs_etm__free_traceid_queues()
565 zfree(&tidq->last_branch_rb); in cs_etm__free_traceid_queues()
566 zfree(&tidq->prev_packet); in cs_etm__free_traceid_queues()
567 zfree(&tidq->packet); in cs_etm__free_traceid_queues()
579 etmq->traceid_queues_list = NULL; in cs_etm__free_traceid_queues()
582 zfree(&etmq->traceid_queues); in cs_etm__free_traceid_queues()
592 cs_etm_decoder__free(etmq->decoder); in cs_etm__free_queue()
600 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__free_events()
603 struct auxtrace_queues *queues = &aux->queues; in cs_etm__free_events()
605 for (i = 0; i < queues->nr_queues; i++) { in cs_etm__free_events()
606 cs_etm__free_queue(queues->queue_array[i].priv); in cs_etm__free_events()
607 queues->queue_array[i].priv = NULL; in cs_etm__free_events()
617 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__free()
621 session->auxtrace = NULL; in cs_etm__free()
629 for (i = 0; i < aux->num_cpu; i++) in cs_etm__free()
630 zfree(&aux->metadata[i]); in cs_etm__free()
632 thread__zput(aux->unknown_thread); in cs_etm__free()
633 zfree(&aux->metadata); in cs_etm__free()
640 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__evsel_is_auxtrace()
644 return evsel->core.attr.type == aux->pmu_type; in cs_etm__evsel_is_auxtrace()
651 machine = etmq->etm->machine; in cs_etm__cpu_mode()
653 if (address >= etmq->etm->kernel_start) { in cs_etm__cpu_mode()
682 machine = etmq->etm->machine; in cs_etm__mem_access()
688 thread = tidq->thread; in cs_etm__mem_access()
692 thread = etmq->etm->unknown_thread; in cs_etm__mem_access()
695 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso) in cs_etm__mem_access()
698 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && in cs_etm__mem_access()
699 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE)) in cs_etm__mem_access()
702 offset = al.map->map_ip(al.map, address); in cs_etm__mem_access()
706 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size); in cs_etm__mem_access()
724 etmq->traceid_queues_list = intlist__new(NULL); in cs_etm__alloc_queue()
725 if (!etmq->traceid_queues_list) in cs_etm__alloc_queue()
729 t_params = zalloc(sizeof(*t_params) * etm->num_cpu); in cs_etm__alloc_queue()
742 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params); in cs_etm__alloc_queue()
744 if (!etmq->decoder) in cs_etm__alloc_queue()
751 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder, in cs_etm__alloc_queue()
752 0x0L, ((u64) -1L), in cs_etm__alloc_queue()
760 cs_etm_decoder__free(etmq->decoder); in cs_etm__alloc_queue()
762 intlist__delete(etmq->traceid_queues_list); in cs_etm__alloc_queue()
776 struct cs_etm_queue *etmq = queue->priv; in cs_etm__setup_queue()
778 if (list_empty(&queue->head) || etmq) in cs_etm__setup_queue()
784 ret = -ENOMEM; in cs_etm__setup_queue()
788 queue->priv = etmq; in cs_etm__setup_queue()
789 etmq->etm = etm; in cs_etm__setup_queue()
790 etmq->queue_nr = queue_nr; in cs_etm__setup_queue()
791 etmq->offset = 0; in cs_etm__setup_queue()
793 if (etm->timeless_decoding) in cs_etm__setup_queue()
797 * We are under a CPU-wide trace scenario. As such we need to know in cs_etm__setup_queue()
815 * encountering a timestamp, a full packet queue or the end of in cs_etm__setup_queue()
833 * We didn't find a timestamp so empty all the traceid packet in cs_etm__setup_queue()
834 * queues before looking for another timestamp packet, either in cs_etm__setup_queue()
849 * Note that packets decoded above are still in the traceID's packet in cs_etm__setup_queue()
853 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp); in cs_etm__setup_queue()
863 if (!etm->kernel_start) in cs_etm__setup_queues()
864 etm->kernel_start = machine__kernel_start(etm->machine); in cs_etm__setup_queues()
866 for (i = 0; i < etm->queues.nr_queues; i++) { in cs_etm__setup_queues()
867 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i); in cs_etm__setup_queues()
877 if (etm->queues.new_data) { in cs_etm__update_queues()
878 etm->queues.new_data = false; in cs_etm__update_queues()
889 struct branch_stack *bs_src = tidq->last_branch_rb; in cs_etm__copy_last_branch_rb()
890 struct branch_stack *bs_dst = tidq->last_branch; in cs_etm__copy_last_branch_rb()
894 * Set the number of records before early exit: ->nr is used to in cs_etm__copy_last_branch_rb()
895 * determine how many branches to copy from ->entries. in cs_etm__copy_last_branch_rb()
897 bs_dst->nr = bs_src->nr; in cs_etm__copy_last_branch_rb()
902 if (!bs_src->nr) in cs_etm__copy_last_branch_rb()
906 * As bs_src->entries is a circular buffer, we need to copy from it in in cs_etm__copy_last_branch_rb()
908 * branch ->last_branch_pos until the end of bs_src->entries buffer. in cs_etm__copy_last_branch_rb()
910 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos; in cs_etm__copy_last_branch_rb()
911 memcpy(&bs_dst->entries[0], in cs_etm__copy_last_branch_rb()
912 &bs_src->entries[tidq->last_branch_pos], in cs_etm__copy_last_branch_rb()
917 * of the bs_src->entries buffer and until the ->last_branch_pos element in cs_etm__copy_last_branch_rb()
922 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) { in cs_etm__copy_last_branch_rb()
923 memcpy(&bs_dst->entries[nr], in cs_etm__copy_last_branch_rb()
924 &bs_src->entries[0], in cs_etm__copy_last_branch_rb()
925 sizeof(struct branch_entry) * tidq->last_branch_pos); in cs_etm__copy_last_branch_rb()
932 tidq->last_branch_pos = 0; in cs_etm__reset_last_branch_rb()
933 tidq->last_branch_rb->nr = 0; in cs_etm__reset_last_branch_rb()
945 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111 in cs_etm__t32_instr_size()
946 * denote a 32-bit instruction. in cs_etm__t32_instr_size()
951 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet) in cs_etm__first_executed_instr() argument
953 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */ in cs_etm__first_executed_instr()
954 if (packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__first_executed_instr()
957 return packet->start_addr; in cs_etm__first_executed_instr()
961 u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet) in cs_etm__last_executed_instr() argument
963 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */ in cs_etm__last_executed_instr()
964 if (packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__last_executed_instr()
967 return packet->end_addr - packet->last_instr_size; in cs_etm__last_executed_instr()
972 const struct cs_etm_packet *packet, in cs_etm__instr_addr() argument
975 if (packet->isa == CS_ETM_ISA_T32) { in cs_etm__instr_addr()
976 u64 addr = packet->start_addr; in cs_etm__instr_addr()
981 offset--; in cs_etm__instr_addr()
987 return packet->start_addr + offset * 4; in cs_etm__instr_addr()
993 struct branch_stack *bs = tidq->last_branch_rb; in cs_etm__update_last_branch_rb()
1002 if (!tidq->last_branch_pos) in cs_etm__update_last_branch_rb()
1003 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz; in cs_etm__update_last_branch_rb()
1005 tidq->last_branch_pos -= 1; in cs_etm__update_last_branch_rb()
1007 be = &bs->entries[tidq->last_branch_pos]; in cs_etm__update_last_branch_rb()
1008 be->from = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__update_last_branch_rb()
1009 be->to = cs_etm__first_executed_instr(tidq->packet); in cs_etm__update_last_branch_rb()
1011 be->flags.mispred = 0; in cs_etm__update_last_branch_rb()
1012 be->flags.predicted = 1; in cs_etm__update_last_branch_rb()
1015 * Increment bs->nr until reaching the number of last branches asked by in cs_etm__update_last_branch_rb()
1018 if (bs->nr < etmq->etm->synth_opts.last_branch_sz) in cs_etm__update_last_branch_rb()
1019 bs->nr += 1; in cs_etm__update_last_branch_rb()
1025 event->header.size = perf_event__sample_event_size(sample, type, 0); in cs_etm__inject_event()
1033 struct auxtrace_buffer *aux_buffer = etmq->buffer; in cs_etm__get_trace()
1037 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__get_trace()
1045 etmq->buf_len = 0; in cs_etm__get_trace()
1049 etmq->buffer = aux_buffer; in cs_etm__get_trace()
1052 if (!aux_buffer->data) { in cs_etm__get_trace()
1054 int fd = perf_data__fd(etmq->etm->session->data); in cs_etm__get_trace()
1056 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd); in cs_etm__get_trace()
1057 if (!aux_buffer->data) in cs_etm__get_trace()
1058 return -ENOMEM; in cs_etm__get_trace()
1065 etmq->buf_used = 0; in cs_etm__get_trace()
1066 etmq->buf_len = aux_buffer->size; in cs_etm__get_trace()
1067 etmq->buf = aux_buffer->data; in cs_etm__get_trace()
1069 return etmq->buf_len; in cs_etm__get_trace()
1075 if ((!tidq->thread) && (tidq->tid != -1)) in cs_etm__set_pid_tid_cpu()
1076 tidq->thread = machine__find_thread(etm->machine, -1, in cs_etm__set_pid_tid_cpu()
1077 tidq->tid); in cs_etm__set_pid_tid_cpu()
1079 if (tidq->thread) in cs_etm__set_pid_tid_cpu()
1080 tidq->pid = tidq->thread->pid_; in cs_etm__set_pid_tid_cpu()
1086 int cpu, err = -EINVAL; in cs_etm__etmq_set_tid()
1087 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__etmq_set_tid()
1097 err = machine__set_current_tid(etm->machine, cpu, tid, tid); in cs_etm__etmq_set_tid()
1101 tidq->tid = tid; in cs_etm__etmq_set_tid()
1102 thread__zput(tidq->thread); in cs_etm__etmq_set_tid()
1110 return !!etmq->etm->timeless_decoding; in cs_etm__etmq_is_timeless()
1115 const struct cs_etm_packet *packet, in cs_etm__copy_insn() argument
1120 * packet, so directly bail out with 'insn_len' = 0. in cs_etm__copy_insn()
1122 if (packet->sample_type == CS_ETM_DISCONTINUITY) { in cs_etm__copy_insn()
1123 sample->insn_len = 0; in cs_etm__copy_insn()
1128 * T32 instruction size might be 32-bit or 16-bit, decide by calling in cs_etm__copy_insn()
1131 if (packet->isa == CS_ETM_ISA_T32) in cs_etm__copy_insn()
1132 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id, in cs_etm__copy_insn()
1133 sample->ip); in cs_etm__copy_insn()
1134 /* Otherwise, A64 and A32 instruction size are always 32-bit. */ in cs_etm__copy_insn()
1136 sample->insn_len = 4; in cs_etm__copy_insn()
1138 cs_etm__mem_access(etmq, trace_chan_id, sample->ip, in cs_etm__copy_insn()
1139 sample->insn_len, (void *)sample->insn); in cs_etm__copy_insn()
1147 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_instruction_sample()
1148 union perf_event *event = tidq->event_buf; in cs_etm__synth_instruction_sample()
1151 event->sample.header.type = PERF_RECORD_SAMPLE; in cs_etm__synth_instruction_sample()
1152 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr); in cs_etm__synth_instruction_sample()
1153 event->sample.header.size = sizeof(struct perf_event_header); in cs_etm__synth_instruction_sample()
1156 sample.pid = tidq->pid; in cs_etm__synth_instruction_sample()
1157 sample.tid = tidq->tid; in cs_etm__synth_instruction_sample()
1158 sample.id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1159 sample.stream_id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1161 sample.cpu = tidq->packet->cpu; in cs_etm__synth_instruction_sample()
1162 sample.flags = tidq->prev_packet->flags; in cs_etm__synth_instruction_sample()
1163 sample.cpumode = event->sample.header.misc; in cs_etm__synth_instruction_sample()
1165 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample); in cs_etm__synth_instruction_sample()
1167 if (etm->synth_opts.last_branch) in cs_etm__synth_instruction_sample()
1168 sample.branch_stack = tidq->last_branch; in cs_etm__synth_instruction_sample()
1170 if (etm->synth_opts.inject) { in cs_etm__synth_instruction_sample()
1172 etm->instructions_sample_type); in cs_etm__synth_instruction_sample()
1177 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_instruction_sample()
1188 * The cs etm packet encodes an instruction range between a branch target
1195 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_branch_sample()
1197 union perf_event *event = tidq->event_buf; in cs_etm__synth_branch_sample()
1205 ip = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__synth_branch_sample()
1207 event->sample.header.type = PERF_RECORD_SAMPLE; in cs_etm__synth_branch_sample()
1208 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip); in cs_etm__synth_branch_sample()
1209 event->sample.header.size = sizeof(struct perf_event_header); in cs_etm__synth_branch_sample()
1212 sample.pid = tidq->pid; in cs_etm__synth_branch_sample()
1213 sample.tid = tidq->tid; in cs_etm__synth_branch_sample()
1214 sample.addr = cs_etm__first_executed_instr(tidq->packet); in cs_etm__synth_branch_sample()
1215 sample.id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1216 sample.stream_id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1218 sample.cpu = tidq->packet->cpu; in cs_etm__synth_branch_sample()
1219 sample.flags = tidq->prev_packet->flags; in cs_etm__synth_branch_sample()
1220 sample.cpumode = event->sample.header.misc; in cs_etm__synth_branch_sample()
1222 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet, in cs_etm__synth_branch_sample()
1228 if (etm->synth_opts.last_branch) { in cs_etm__synth_branch_sample()
1231 .hw_idx = -1ULL, in cs_etm__synth_branch_sample()
1240 if (etm->synth_opts.inject) { in cs_etm__synth_branch_sample()
1242 etm->branches_sample_type); in cs_etm__synth_branch_sample()
1247 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_branch_sample()
1270 return perf_session__deliver_synth_event(cs_etm_synth->session, in cs_etm__event_synth()
1289 struct evlist *evlist = session->evlist; in cs_etm__synth_events()
1297 if (evsel->core.attr.type == etm->pmu_type) { in cs_etm__synth_events()
1311 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; in cs_etm__synth_events()
1314 if (etm->timeless_decoding) in cs_etm__synth_events()
1319 attr.exclude_user = evsel->core.attr.exclude_user; in cs_etm__synth_events()
1320 attr.exclude_kernel = evsel->core.attr.exclude_kernel; in cs_etm__synth_events()
1321 attr.exclude_hv = evsel->core.attr.exclude_hv; in cs_etm__synth_events()
1322 attr.exclude_host = evsel->core.attr.exclude_host; in cs_etm__synth_events()
1323 attr.exclude_guest = evsel->core.attr.exclude_guest; in cs_etm__synth_events()
1324 attr.sample_id_all = evsel->core.attr.sample_id_all; in cs_etm__synth_events()
1325 attr.read_format = evsel->core.attr.read_format; in cs_etm__synth_events()
1328 id = evsel->core.id[0] + 1000000000; in cs_etm__synth_events()
1333 if (etm->synth_opts.branches) { in cs_etm__synth_events()
1340 etm->sample_branches = true; in cs_etm__synth_events()
1341 etm->branches_sample_type = attr.sample_type; in cs_etm__synth_events()
1342 etm->branches_id = id; in cs_etm__synth_events()
1347 if (etm->synth_opts.last_branch) { in cs_etm__synth_events()
1357 if (etm->synth_opts.instructions) { in cs_etm__synth_events()
1359 attr.sample_period = etm->synth_opts.period; in cs_etm__synth_events()
1360 etm->instructions_sample_period = attr.sample_period; in cs_etm__synth_events()
1364 etm->sample_instructions = true; in cs_etm__synth_events()
1365 etm->instructions_sample_type = attr.sample_type; in cs_etm__synth_events()
1366 etm->instructions_id = id; in cs_etm__synth_events()
1376 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__sample()
1378 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__sample()
1381 /* Get instructions remainder from previous packet */ in cs_etm__sample()
1382 instrs_prev = tidq->period_instructions; in cs_etm__sample()
1384 tidq->period_instructions += tidq->packet->instr_count; in cs_etm__sample()
1390 if (etm->synth_opts.last_branch && in cs_etm__sample()
1391 tidq->prev_packet->sample_type == CS_ETM_RANGE && in cs_etm__sample()
1392 tidq->prev_packet->last_instr_taken_branch) in cs_etm__sample()
1395 if (etm->sample_instructions && in cs_etm__sample()
1396 tidq->period_instructions >= etm->instructions_sample_period) { in cs_etm__sample()
1410 * -------------------------------------------------- in cs_etm__sample()
1417 * \---------------- -----------------/ in cs_etm__sample()
1419 * tidq->packet->instr_count in cs_etm__sample()
1422 * every etm->instructions_sample_period instructions - as in cs_etm__sample()
1424 * last sample before the current etm packet, n+1 to n+3 in cs_etm__sample()
1425 * samples are generated from the current etm packet. in cs_etm__sample()
1427 * tidq->packet->instr_count represents the number of in cs_etm__sample()
1428 * instructions in the current etm packet. in cs_etm__sample()
1432 * previous etm packet. This will always be less than in cs_etm__sample()
1433 * etm->instructions_sample_period. in cs_etm__sample()
1436 * instructions, one is the tail of the old packet and another in cs_etm__sample()
1437 * is the head of the new coming packet, to generate in cs_etm__sample()
1440 * instructions will be used by later packet and it is assigned in cs_etm__sample()
1441 * to tidq->period_instructions for next round calculation. in cs_etm__sample()
1445 * Get the initial offset into the current packet instructions; in cs_etm__sample()
1447 * etm->instructions_sample_period. in cs_etm__sample()
1449 u64 offset = etm->instructions_sample_period - instrs_prev; in cs_etm__sample()
1453 if (etm->synth_opts.last_branch) in cs_etm__sample()
1456 while (tidq->period_instructions >= in cs_etm__sample()
1457 etm->instructions_sample_period) { in cs_etm__sample()
1459 * Calculate the address of the sampled instruction (-1 in cs_etm__sample()
1465 tidq->packet, offset - 1); in cs_etm__sample()
1468 etm->instructions_sample_period); in cs_etm__sample()
1472 offset += etm->instructions_sample_period; in cs_etm__sample()
1473 tidq->period_instructions -= in cs_etm__sample()
1474 etm->instructions_sample_period; in cs_etm__sample()
1478 if (etm->sample_branches) { in cs_etm__sample()
1481 /* Generate sample for tracing on packet */ in cs_etm__sample()
1482 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__sample()
1485 /* Generate sample for branch taken packet */ in cs_etm__sample()
1486 if (tidq->prev_packet->sample_type == CS_ETM_RANGE && in cs_etm__sample()
1487 tidq->prev_packet->last_instr_taken_branch) in cs_etm__sample()
1505 * When the exception packet is inserted, whether the last instruction in cs_etm__exception()
1506 * in previous range packet is taken branch or not, we need to force in cs_etm__exception()
1507 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures in cs_etm__exception()
1511 * The exception packet includes the dummy address values, so don't in cs_etm__exception()
1512 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful in cs_etm__exception()
1515 if (tidq->prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__exception()
1516 tidq->prev_packet->last_instr_taken_branch = true; in cs_etm__exception()
1525 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__flush()
1527 /* Handle start tracing packet */ in cs_etm__flush()
1528 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY) in cs_etm__flush()
1531 if (etmq->etm->synth_opts.last_branch && in cs_etm__flush()
1532 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__flush()
1545 addr = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__flush()
1549 tidq->period_instructions); in cs_etm__flush()
1553 tidq->period_instructions = 0; in cs_etm__flush()
1557 if (etm->sample_branches && in cs_etm__flush()
1558 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__flush()
1568 if (etm->synth_opts.last_branch) in cs_etm__flush()
1580 * It has no new packet coming and 'etmq->packet' contains the stale in cs_etm__end_block()
1581 * packet which was set at the previous time with packets swapping; in cs_etm__end_block()
1582 * so skip to generate branch sample to avoid stale packet. in cs_etm__end_block()
1588 if (etmq->etm->synth_opts.last_branch && in cs_etm__end_block()
1589 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__end_block()
1599 addr = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__end_block()
1603 tidq->period_instructions); in cs_etm__end_block()
1607 tidq->period_instructions = 0; in cs_etm__end_block()
1623 if (!etmq->buf_len) { in cs_etm__get_data_block()
1629 * are contiguous, reset the decoder to force re-sync. in cs_etm__get_data_block()
1631 ret = cs_etm_decoder__reset(etmq->decoder); in cs_etm__get_data_block()
1636 return etmq->buf_len; in cs_etm__get_data_block()
1640 struct cs_etm_packet *packet, in cs_etm__is_svc_instr() argument
1648 switch (packet->isa) { in cs_etm__is_svc_instr()
1654 * +-----------------+--------+ in cs_etm__is_svc_instr()
1656 * +-----------------+--------+ in cs_etm__is_svc_instr()
1662 addr = end_addr - 2; in cs_etm__is_svc_instr()
1674 * +---------+---------+-------------------------+ in cs_etm__is_svc_instr()
1676 * +---------+---------+-------------------------+ in cs_etm__is_svc_instr()
1678 addr = end_addr - 4; in cs_etm__is_svc_instr()
1691 * +-----------------------+---------+-----------+ in cs_etm__is_svc_instr()
1693 * +-----------------------+---------+-----------+ in cs_etm__is_svc_instr()
1695 addr = end_addr - 4; in cs_etm__is_svc_instr()
1711 struct cs_etm_traceid_queue *tidq, u64 magic) in cs_etm__is_syscall() argument
1713 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__is_syscall()
1714 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_syscall() local
1715 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__is_syscall()
1717 if (magic == __perf_cs_etmv3_magic) in cs_etm__is_syscall()
1718 if (packet->exception_number == CS_ETMV3_EXC_SVC) in cs_etm__is_syscall()
1724 * packet address. in cs_etm__is_syscall()
1726 if (magic == __perf_cs_etmv4_magic) { in cs_etm__is_syscall()
1727 if (packet->exception_number == CS_ETMV4_EXC_CALL && in cs_etm__is_syscall()
1729 prev_packet->end_addr)) in cs_etm__is_syscall()
1737 u64 magic) in cs_etm__is_async_exception() argument
1739 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_async_exception() local
1741 if (magic == __perf_cs_etmv3_magic) in cs_etm__is_async_exception()
1742 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT || in cs_etm__is_async_exception()
1743 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT || in cs_etm__is_async_exception()
1744 packet->exception_number == CS_ETMV3_EXC_PE_RESET || in cs_etm__is_async_exception()
1745 packet->exception_number == CS_ETMV3_EXC_IRQ || in cs_etm__is_async_exception()
1746 packet->exception_number == CS_ETMV3_EXC_FIQ) in cs_etm__is_async_exception()
1749 if (magic == __perf_cs_etmv4_magic) in cs_etm__is_async_exception()
1750 if (packet->exception_number == CS_ETMV4_EXC_RESET || in cs_etm__is_async_exception()
1751 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT || in cs_etm__is_async_exception()
1752 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR || in cs_etm__is_async_exception()
1753 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG || in cs_etm__is_async_exception()
1754 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG || in cs_etm__is_async_exception()
1755 packet->exception_number == CS_ETMV4_EXC_IRQ || in cs_etm__is_async_exception()
1756 packet->exception_number == CS_ETMV4_EXC_FIQ) in cs_etm__is_async_exception()
1764 u64 magic) in cs_etm__is_sync_exception() argument
1766 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__is_sync_exception()
1767 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_sync_exception() local
1768 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__is_sync_exception()
1770 if (magic == __perf_cs_etmv3_magic) in cs_etm__is_sync_exception()
1771 if (packet->exception_number == CS_ETMV3_EXC_SMC || in cs_etm__is_sync_exception()
1772 packet->exception_number == CS_ETMV3_EXC_HYP || in cs_etm__is_sync_exception()
1773 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE || in cs_etm__is_sync_exception()
1774 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR || in cs_etm__is_sync_exception()
1775 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT || in cs_etm__is_sync_exception()
1776 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT || in cs_etm__is_sync_exception()
1777 packet->exception_number == CS_ETMV3_EXC_GENERIC) in cs_etm__is_sync_exception()
1780 if (magic == __perf_cs_etmv4_magic) { in cs_etm__is_sync_exception()
1781 if (packet->exception_number == CS_ETMV4_EXC_TRAP || in cs_etm__is_sync_exception()
1782 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT || in cs_etm__is_sync_exception()
1783 packet->exception_number == CS_ETMV4_EXC_INST_FAULT || in cs_etm__is_sync_exception()
1784 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT) in cs_etm__is_sync_exception()
1791 if (packet->exception_number == CS_ETMV4_EXC_CALL && in cs_etm__is_sync_exception()
1793 prev_packet->end_addr)) in cs_etm__is_sync_exception()
1803 if (packet->exception_number > CS_ETMV4_EXC_FIQ && in cs_etm__is_sync_exception()
1804 packet->exception_number <= CS_ETMV4_EXC_END) in cs_etm__is_sync_exception()
1814 struct cs_etm_packet *packet = tidq->packet; in cs_etm__set_sample_flags() local
1815 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__set_sample_flags()
1816 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__set_sample_flags()
1817 u64 magic; in cs_etm__set_sample_flags() local
1820 switch (packet->sample_type) { in cs_etm__set_sample_flags()
1827 if (packet->last_instr_type == OCSD_INSTR_BR && in cs_etm__set_sample_flags()
1828 packet->last_instr_subtype == OCSD_S_INSTR_NONE) { in cs_etm__set_sample_flags()
1829 packet->flags = PERF_IP_FLAG_BRANCH; in cs_etm__set_sample_flags()
1831 if (packet->last_instr_cond) in cs_etm__set_sample_flags()
1832 packet->flags |= PERF_IP_FLAG_CONDITIONAL; in cs_etm__set_sample_flags()
1839 if (packet->last_instr_type == OCSD_INSTR_BR && in cs_etm__set_sample_flags()
1840 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK) in cs_etm__set_sample_flags()
1841 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1848 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
1849 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK) in cs_etm__set_sample_flags()
1850 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1858 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
1859 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET) in cs_etm__set_sample_flags()
1860 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1868 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
1869 packet->last_instr_subtype == OCSD_S_INSTR_NONE) in cs_etm__set_sample_flags()
1870 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1874 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
1875 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET) in cs_etm__set_sample_flags()
1876 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1884 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__set_sample_flags()
1885 prev_packet->flags |= PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1889 * If the previous packet is an exception return packet in cs_etm__set_sample_flags()
1891 * it needs to calibrate the previous packet sample flags in cs_etm__set_sample_flags()
1894 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1898 packet, packet->start_addr)) in cs_etm__set_sample_flags()
1899 prev_packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1905 * The trace is discontinuous, if the previous packet is in cs_etm__set_sample_flags()
1906 * instruction packet, set flag PERF_IP_FLAG_TRACE_END in cs_etm__set_sample_flags()
1907 * for previous packet. in cs_etm__set_sample_flags()
1909 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
1910 prev_packet->flags |= PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1914 ret = cs_etm__get_magic(packet->trace_chan_id, &magic); in cs_etm__set_sample_flags()
1919 if (cs_etm__is_syscall(etmq, tidq, magic)) in cs_etm__set_sample_flags()
1920 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1927 else if (cs_etm__is_async_exception(tidq, magic)) in cs_etm__set_sample_flags()
1928 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1936 else if (cs_etm__is_sync_exception(etmq, tidq, magic)) in cs_etm__set_sample_flags()
1937 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1942 * When the exception packet is inserted, since exception in cs_etm__set_sample_flags()
1943 * packet is not used standalone for generating samples in cs_etm__set_sample_flags()
1945 * packet; so set previous range packet flags to tell perf in cs_etm__set_sample_flags()
1948 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
1949 prev_packet->flags = packet->flags; in cs_etm__set_sample_flags()
1953 * When the exception return packet is inserted, since in cs_etm__set_sample_flags()
1954 * exception return packet is not used standalone for in cs_etm__set_sample_flags()
1956 * instruction range packet; so set previous range packet in cs_etm__set_sample_flags()
1960 * other exception types; unfortunately the packet doesn't in cs_etm__set_sample_flags()
1962 * the exception type purely based on exception return packet. in cs_etm__set_sample_flags()
1963 * If we record the exception number from exception packet and in cs_etm__set_sample_flags()
1964 * reuse it for excpetion return packet, this is not reliable in cs_etm__set_sample_flags()
1967 * used for exception return packet for these two cases. in cs_etm__set_sample_flags()
1969 * For exception return packet, we only need to distinguish the in cs_etm__set_sample_flags()
1970 * packet is for system call or for other types. Thus the in cs_etm__set_sample_flags()
1971 * decision can be deferred when receive the next packet which in cs_etm__set_sample_flags()
1977 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
1978 prev_packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
1996 * Packets are decoded and added to the decoder's packet queue in cs_etm__decode_data_block()
1997 * until the decoder packet processing callback has requested that in cs_etm__decode_data_block()
1999 * operations that stop processing are a timestamp packet or a full in cs_etm__decode_data_block()
2002 ret = cs_etm_decoder__process_data_block(etmq->decoder, in cs_etm__decode_data_block()
2003 etmq->offset, in cs_etm__decode_data_block()
2004 &etmq->buf[etmq->buf_used], in cs_etm__decode_data_block()
2005 etmq->buf_len, in cs_etm__decode_data_block()
2010 etmq->offset += processed; in cs_etm__decode_data_block()
2011 etmq->buf_used += processed; in cs_etm__decode_data_block()
2012 etmq->buf_len -= processed; in cs_etm__decode_data_block()
2024 packet_queue = &tidq->packet_queue; in cs_etm__process_traceid_queue()
2026 /* Process each packet in this chunk */ in cs_etm__process_traceid_queue()
2029 tidq->packet); in cs_etm__process_traceid_queue()
2038 * Since packet addresses are swapped in packet in cs_etm__process_traceid_queue()
2048 switch (tidq->packet->sample_type) { in cs_etm__process_traceid_queue()
2051 * If the packet contains an instruction in cs_etm__process_traceid_queue()
2060 * If the exception packet is coming, in cs_etm__process_traceid_queue()
2062 * range packet to be handled properly. in cs_etm__process_traceid_queue()
2075 * Should not receive empty packet, in cs_etm__process_traceid_queue()
2078 pr_err("CS ETM Trace: empty packet\n"); in cs_etm__process_traceid_queue()
2079 return -EINVAL; in cs_etm__process_traceid_queue()
2093 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__clear_all_traceid_queues()
2096 idx = (int)(intptr_t)inode->priv; in cs_etm__clear_all_traceid_queues()
2097 tidq = etmq->traceid_queues[idx]; in cs_etm__clear_all_traceid_queues()
2117 return -EINVAL; in cs_etm__run_decoder()
2132 * Process each packet in this chunk, nothing to do if in cs_etm__run_decoder()
2138 } while (etmq->buf_len); in cs_etm__run_decoder()
2152 struct auxtrace_queues *queues = &etm->queues; in cs_etm__process_timeless_queues()
2154 for (i = 0; i < queues->nr_queues; i++) { in cs_etm__process_timeless_queues()
2155 struct auxtrace_queue *queue = &etm->queues.queue_array[i]; in cs_etm__process_timeless_queues()
2156 struct cs_etm_queue *etmq = queue->priv; in cs_etm__process_timeless_queues()
2168 if ((tid == -1) || (tidq->tid == tid)) { in cs_etm__process_timeless_queues()
2188 if (!etm->heap.heap_cnt) in cs_etm__process_queues()
2192 cs_queue_nr = etm->heap.heap_array[0].queue_nr; in cs_etm__process_queues()
2195 queue = &etm->queues.queue_array[queue_nr]; in cs_etm__process_queues()
2196 etmq = queue->priv; in cs_etm__process_queues()
2202 auxtrace_heap__pop(&etm->heap); in cs_etm__process_queues()
2211 ret = -EINVAL; in cs_etm__process_queues()
2269 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp); in cs_etm__process_queues()
2281 if (etm->timeless_decoding) in cs_etm__process_itrace_start()
2288 th = machine__findnew_thread(etm->machine, in cs_etm__process_itrace_start()
2289 event->itrace_start.pid, in cs_etm__process_itrace_start()
2290 event->itrace_start.tid); in cs_etm__process_itrace_start()
2292 return -ENOMEM; in cs_etm__process_itrace_start()
2303 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; in cs_etm__process_switch_cpu_wide()
2306 * Context switch in per-thread mode are irrelevant since perf in cs_etm__process_switch_cpu_wide()
2309 if (etm->timeless_decoding) in cs_etm__process_switch_cpu_wide()
2324 th = machine__findnew_thread(etm->machine, in cs_etm__process_switch_cpu_wide()
2325 event->context_switch.next_prev_pid, in cs_etm__process_switch_cpu_wide()
2326 event->context_switch.next_prev_tid); in cs_etm__process_switch_cpu_wide()
2328 return -ENOMEM; in cs_etm__process_switch_cpu_wide()
2342 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_event()
2349 if (!tool->ordered_events) { in cs_etm__process_event()
2351 return -EINVAL; in cs_etm__process_event()
2354 if (sample->time && (sample->time != (u64) -1)) in cs_etm__process_event()
2355 timestamp = sample->time; in cs_etm__process_event()
2359 if (timestamp || etm->timeless_decoding) { in cs_etm__process_event()
2365 if (etm->timeless_decoding && in cs_etm__process_event()
2366 event->header.type == PERF_RECORD_EXIT) in cs_etm__process_event()
2368 event->fork.tid); in cs_etm__process_event()
2370 if (event->header.type == PERF_RECORD_ITRACE_START) in cs_etm__process_event()
2372 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) in cs_etm__process_event()
2375 if (!etm->timeless_decoding && in cs_etm__process_event()
2376 event->header.type == PERF_RECORD_AUX) in cs_etm__process_event()
2386 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_auxtrace_event()
2389 if (!etm->data_queued) { in cs_etm__process_auxtrace_event()
2392 int fd = perf_data__fd(session->data); in cs_etm__process_auxtrace_event()
2393 bool is_pipe = perf_data__is_pipe(session->data); in cs_etm__process_auxtrace_event()
2400 if (data_offset == -1) in cs_etm__process_auxtrace_event()
2401 return -errno; in cs_etm__process_auxtrace_event()
2404 err = auxtrace_queues__add_event(&etm->queues, session, in cs_etm__process_auxtrace_event()
2422 struct evlist *evlist = etm->session->evlist; in cs_etm__is_timeless_decoding()
2430 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) in cs_etm__is_timeless_decoding()
2444 [CS_ETM_MAGIC] = " Magic number %llx\n",
2453 [CS_ETM_MAGIC] = " Magic number %llx\n",
2487 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; in cs_etm__process_auxtrace_info()
2493 int total_size = auxtrace_info->header.size; in cs_etm__process_auxtrace_info()
2496 int err = 0, idx = -1; in cs_etm__process_auxtrace_info()
2508 return -EINVAL; in cs_etm__process_auxtrace_info()
2510 priv_size = total_size - event_header_size - info_header_size; in cs_etm__process_auxtrace_info()
2513 ptr = (u64 *) auxtrace_info->priv; in cs_etm__process_auxtrace_info()
2517 return -EINVAL; in cs_etm__process_auxtrace_info()
2521 return -ENOMEM; in cs_etm__process_auxtrace_info()
2523 /* Extract header information - see cs-etm.h for format */ in cs_etm__process_auxtrace_info()
2531 * Create an RB tree for traceID-metadata tuple. Since the conversion in cs_etm__process_auxtrace_info()
2532 * has to be made for each packet that gets decoded, optimizing access in cs_etm__process_auxtrace_info()
2537 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2543 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2558 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2571 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2587 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2595 if (inode->priv) { in cs_etm__process_auxtrace_info()
2596 err = -EINVAL; in cs_etm__process_auxtrace_info()
2600 inode->priv = metadata[j]; in cs_etm__process_auxtrace_info()
2611 err = -EINVAL; in cs_etm__process_auxtrace_info()
2618 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2622 err = auxtrace_queues__init(&etm->queues); in cs_etm__process_auxtrace_info()
2626 etm->session = session; in cs_etm__process_auxtrace_info()
2627 etm->machine = &session->machines.host; in cs_etm__process_auxtrace_info()
2629 etm->num_cpu = num_cpu; in cs_etm__process_auxtrace_info()
2630 etm->pmu_type = pmu_type; in cs_etm__process_auxtrace_info()
2631 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0); in cs_etm__process_auxtrace_info()
2632 etm->metadata = metadata; in cs_etm__process_auxtrace_info()
2633 etm->auxtrace_type = auxtrace_info->type; in cs_etm__process_auxtrace_info()
2634 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm); in cs_etm__process_auxtrace_info()
2636 etm->auxtrace.process_event = cs_etm__process_event; in cs_etm__process_auxtrace_info()
2637 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event; in cs_etm__process_auxtrace_info()
2638 etm->auxtrace.flush_events = cs_etm__flush_events; in cs_etm__process_auxtrace_info()
2639 etm->auxtrace.free_events = cs_etm__free_events; in cs_etm__process_auxtrace_info()
2640 etm->auxtrace.free = cs_etm__free; in cs_etm__process_auxtrace_info()
2641 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace; in cs_etm__process_auxtrace_info()
2642 session->auxtrace = &etm->auxtrace; in cs_etm__process_auxtrace_info()
2644 etm->unknown_thread = thread__new(999999999, 999999999); in cs_etm__process_auxtrace_info()
2645 if (!etm->unknown_thread) { in cs_etm__process_auxtrace_info()
2646 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2654 INIT_LIST_HEAD(&etm->unknown_thread->node); in cs_etm__process_auxtrace_info()
2656 err = thread__set_comm(etm->unknown_thread, "unknown", 0); in cs_etm__process_auxtrace_info()
2660 if (thread__init_maps(etm->unknown_thread, etm->machine)) { in cs_etm__process_auxtrace_info()
2661 err = -ENOMEM; in cs_etm__process_auxtrace_info()
2666 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); in cs_etm__process_auxtrace_info()
2670 if (session->itrace_synth_opts->set) { in cs_etm__process_auxtrace_info()
2671 etm->synth_opts = *session->itrace_synth_opts; in cs_etm__process_auxtrace_info()
2673 itrace_synth_opts__set_default(&etm->synth_opts, in cs_etm__process_auxtrace_info()
2674 session->itrace_synth_opts->default_no_sample); in cs_etm__process_auxtrace_info()
2675 etm->synth_opts.callchain = false; in cs_etm__process_auxtrace_info()
2682 err = auxtrace_queues__process_index(&etm->queues, session); in cs_etm__process_auxtrace_info()
2686 etm->data_queued = etm->queues.populated; in cs_etm__process_auxtrace_info()
2691 thread__zput(etm->unknown_thread); in cs_etm__process_auxtrace_info()
2693 auxtrace_queues__free(&etm->queues); in cs_etm__process_auxtrace_info()
2694 session->auxtrace = NULL; in cs_etm__process_auxtrace_info()