Lines Matching +full:10 +full:base +full:- +full:te
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2013-2014, Intel Corporation.
8 * http://software.intel.com/en-us/intel-isa-extensions
23 #include <asm/intel-family.h>
40 * width encoded in IP-related packets), and event configuration (bitmasks with
74 u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; in intel_pt_validate_cap()
75 unsigned int shift = __ffs(cd->mask); in intel_pt_validate_cap()
77 return (c & cd->mask) >> shift; in intel_pt_validate_cap()
93 enum pt_capabilities cap = (long)ea->var; in pt_cap_show()
107 PMU_FORMAT_ATTR(tsc, "config:10" );
111 PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
112 PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
113 PMU_FORMAT_ATTR(psb_period, "config:24-27" );
143 switch (pmu_attr->id) { in pt_timing_attr_show()
154 return -EINVAL; in pt_timing_attr_show()
205 /* model-specific quirks */ in pt_pmu_hw_init()
220 * Intel SDM, 36.5 "Tracing post-VMXON" says that in pt_pmu_hw_init()
222 * post-VMXON. in pt_pmu_hw_init()
237 ret = -ENOMEM; in pt_pmu_hw_init()
251 de_attr->attr.attr.name = pt_caps[i].name; in pt_pmu_hw_init()
253 sysfs_attr_init(&de_attr->attr.attr); in pt_pmu_hw_init()
255 de_attr->attr.attr.mode = S_IRUGO; in pt_pmu_hw_init()
256 de_attr->attr.show = pt_cap_show; in pt_pmu_hw_init()
257 de_attr->var = (void *)i; in pt_pmu_hw_init()
259 attrs[i] = &de_attr->attr.attr; in pt_pmu_hw_init()
304 u64 config = event->attr.config; in pt_event_valid()
370 * Re-using bit 0 for this purpose is fine because it is never in pt_event_valid()
372 * the attr.config resulted in -EINVAL. in pt_event_valid()
401 u64 ctl = event->hw.config; in pt_config_start()
404 if (READ_ONCE(pt->vmx_on)) in pt_config_start()
405 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); in pt_config_start()
409 WRITE_ONCE(event->hw.config, ctl); in pt_config_start()
442 struct pt_filters *filters = event->hw.addr_filters; in pt_config_filters()
452 for (range = 0; range < filters->nr_filters; range++) { in pt_config_filters()
453 struct pt_filter *filter = &filters->filter[range]; in pt_config_filters()
465 if (pt->filters.filter[range].msr_a != filter->msr_a) { in pt_config_filters()
466 wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a); in pt_config_filters()
467 pt->filters.filter[range].msr_a = filter->msr_a; in pt_config_filters()
470 if (pt->filters.filter[range].msr_b != filter->msr_b) { in pt_config_filters()
471 wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b); in pt_config_filters()
472 pt->filters.filter[range].msr_b = filter->msr_b; in pt_config_filters()
475 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off; in pt_config_filters()
484 struct pt_buffer *buf = perf_get_aux(&pt->handle); in pt_config()
488 if (!event->hw.config) { in pt_config()
495 if (!buf->single) in pt_config()
505 if (event->attr.config & BIT(0)) { in pt_config()
506 reg |= event->attr.config & RTIT_CTL_BRANCH_EN; in pt_config()
511 if (!event->attr.exclude_kernel) in pt_config()
513 if (!event->attr.exclude_user) in pt_config()
516 reg |= (event->attr.config & PT_CONFIG_MASK); in pt_config()
518 event->hw.config = reg; in pt_config()
525 u64 ctl = READ_ONCE(event->hw.config); in pt_config_stop()
532 if (!READ_ONCE(pt->vmx_on)) in pt_config_stop()
535 WRITE_ONCE(event->hw.config, ctl); in pt_config_stop()
549 * struct topa - ToPA metadata
565 * Keep ToPA table-related metadata on the same page as the actual table,
570 ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
573 * struct topa_page - page-sized ToPA table with metadata at the top
587 static inline struct topa_page *topa_entry_to_page(struct topa_entry *te) in topa_entry_to_page() argument
589 return (struct topa_page *)((unsigned long)te & PAGE_MASK); in topa_entry_to_page()
597 /* make -1 stand for the last table entry */
599 ((i) == -1 \
600 ? &topa_to_page(t)->table[(t)->last] \
601 : &topa_to_page(t)->table[(i)])
602 #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
603 #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
609 void *base; in pt_config_buffer() local
611 if (buf->single) { in pt_config_buffer()
612 base = buf->data_pages[0]; in pt_config_buffer()
613 mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7; in pt_config_buffer()
615 base = topa_to_page(buf->cur)->table; in pt_config_buffer()
616 mask = (u64)buf->cur_idx; in pt_config_buffer()
619 reg = virt_to_phys(base); in pt_config_buffer()
620 if (pt->output_base != reg) { in pt_config_buffer()
621 pt->output_base = reg; in pt_config_buffer()
625 reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); in pt_config_buffer()
626 if (pt->output_mask != reg) { in pt_config_buffer()
627 pt->output_mask = reg; in pt_config_buffer()
633 * topa_alloc() - allocate page-sized ToPA table
650 tp->topa.last = 0; in topa_alloc()
653 * In case of singe-entry ToPA, always put the self-referencing END in topa_alloc()
657 TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT; in topa_alloc()
658 TOPA_ENTRY(&tp->topa, 1)->end = 1; in topa_alloc()
661 return &tp->topa; in topa_alloc()
665 * topa_free() - free a page-sized ToPA table
674 * topa_insert_table() - insert a ToPA table into a buffer
684 struct topa *last = buf->last; in topa_insert_table()
686 list_add_tail(&topa->list, &buf->tables); in topa_insert_table()
688 if (!buf->first) { in topa_insert_table()
689 buf->first = buf->last = buf->cur = topa; in topa_insert_table()
693 topa->offset = last->offset + last->size; in topa_insert_table()
694 buf->last = topa; in topa_insert_table()
699 BUG_ON(last->last != TENTS_PER_PAGE - 1); in topa_insert_table()
701 TOPA_ENTRY(last, -1)->base = topa_pfn(topa); in topa_insert_table()
702 TOPA_ENTRY(last, -1)->end = 1; in topa_insert_table()
706 * topa_table_full() - check if a ToPA table is filled up
711 /* single-entry ToPA is a special case */ in topa_table_full()
713 return !!topa->last; in topa_table_full()
715 return topa->last == TENTS_PER_PAGE - 1; in topa_table_full()
719 * topa_insert_pages() - create a list of ToPA tables
730 struct topa *topa = buf->last; in topa_insert_pages()
734 p = virt_to_page(buf->data_pages[buf->nr_pages]); in topa_insert_pages()
741 return -ENOMEM; in topa_insert_pages()
746 if (topa->z_count == topa->last - 1) { in topa_insert_pages()
747 if (order == TOPA_ENTRY(topa, topa->last - 1)->size) in topa_insert_pages()
748 topa->z_count++; in topa_insert_pages()
751 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; in topa_insert_pages()
752 TOPA_ENTRY(topa, -1)->size = order; in topa_insert_pages()
753 if (!buf->snapshot && in topa_insert_pages()
755 TOPA_ENTRY(topa, -1)->intr = 1; in topa_insert_pages()
756 TOPA_ENTRY(topa, -1)->stop = 1; in topa_insert_pages()
759 topa->last++; in topa_insert_pages()
760 topa->size += sizes(order); in topa_insert_pages()
762 buf->nr_pages += 1ul << order; in topa_insert_pages()
768 * pt_topa_dump() - print ToPA tables and their entries
775 list_for_each_entry(topa, &buf->tables, list) { in pt_topa_dump()
779 pr_debug("# table @%p, off %llx size %zx\n", tp->table, in pt_topa_dump()
780 topa->offset, topa->size); in pt_topa_dump()
783 &tp->table[i], in pt_topa_dump()
784 (unsigned long)tp->table[i].base << TOPA_SHIFT, in pt_topa_dump()
785 sizes(tp->table[i].size), in pt_topa_dump()
786 tp->table[i].end ? 'E' : ' ', in pt_topa_dump()
787 tp->table[i].intr ? 'I' : ' ', in pt_topa_dump()
788 tp->table[i].stop ? 'S' : ' ', in pt_topa_dump()
789 *(u64 *)&tp->table[i]); in pt_topa_dump()
791 tp->table[i].stop) || in pt_topa_dump()
792 tp->table[i].end) in pt_topa_dump()
794 if (!i && topa->z_count) in pt_topa_dump()
795 i += topa->z_count; in pt_topa_dump()
801 * pt_buffer_advance() - advance to the next output region
808 buf->output_off = 0; in pt_buffer_advance()
809 buf->cur_idx++; in pt_buffer_advance()
811 if (buf->cur_idx == buf->cur->last) { in pt_buffer_advance()
812 if (buf->cur == buf->last) in pt_buffer_advance()
813 buf->cur = buf->first; in pt_buffer_advance()
815 buf->cur = list_entry(buf->cur->list.next, struct topa, in pt_buffer_advance()
817 buf->cur_idx = 0; in pt_buffer_advance()
822 * pt_update_head() - calculate current offsets and sizes
823 * @pt: Per-cpu pt context.
829 struct pt_buffer *buf = perf_get_aux(&pt->handle); in pt_update_head()
830 u64 topa_idx, base, old; in pt_update_head() local
832 if (buf->single) { in pt_update_head()
833 local_set(&buf->data_size, buf->output_off); in pt_update_head()
838 base = buf->cur->offset + buf->output_off; in pt_update_head()
841 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++) in pt_update_head()
842 base += TOPA_ENTRY_SIZE(buf->cur, topa_idx); in pt_update_head()
844 if (buf->snapshot) { in pt_update_head()
845 local_set(&buf->data_size, base); in pt_update_head()
847 old = (local64_xchg(&buf->head, base) & in pt_update_head()
848 ((buf->nr_pages << PAGE_SHIFT) - 1)); in pt_update_head()
849 if (base < old) in pt_update_head()
850 base += buf->nr_pages << PAGE_SHIFT; in pt_update_head()
852 local_add(base - old, &buf->data_size); in pt_update_head()
857 * pt_buffer_region() - obtain current output region's address
862 return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); in pt_buffer_region()
866 * pt_buffer_region_size() - obtain current output region's size
871 return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx); in pt_buffer_region_size()
875 * pt_handle_status() - take care of possible status conditions
876 * @pt: Per-cpu pt context.
880 struct pt_buffer *buf = perf_get_aux(&pt->handle); in pt_handle_status()
896 * On systems that only do single-entry ToPA, hitting STOP in pt_handle_status()
901 buf->output_off == pt_buffer_region_size(buf)) { in pt_handle_status()
902 perf_aux_output_flag(&pt->handle, in pt_handle_status()
909 * Also on single-entry ToPA implementations, interrupt will come in pt_handle_status()
913 !buf->snapshot && in pt_handle_status()
914 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) { in pt_handle_status()
918 memset(head + buf->output_off, 0, in pt_handle_status()
919 pt_buffer_region_size(buf) - in pt_handle_status()
920 buf->output_off); in pt_handle_status()
931 * pt_read_offset() - translate registers into buffer pointers
941 if (!buf->single) { in pt_read_offset()
942 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); in pt_read_offset()
943 tp = phys_to_virt(pt->output_base); in pt_read_offset()
944 buf->cur = &tp->topa; in pt_read_offset()
947 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); in pt_read_offset()
949 buf->output_off = pt->output_mask >> 32; in pt_read_offset()
951 if (!buf->single) in pt_read_offset()
952 buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7; in pt_read_offset()
965 if (WARN_ON_ONCE(pg >= buf->nr_pages)) in pt_topa_entry_for_page()
972 list_for_each_entry(topa, &buf->tables, list) { in pt_topa_entry_for_page()
973 if (topa->offset + topa->size > pg << PAGE_SHIFT) in pt_topa_entry_for_page()
989 if (WARN_ON_ONCE(topa->last == -1)) in pt_topa_entry_for_page()
993 cur_pg = PFN_DOWN(topa->offset); in pt_topa_entry_for_page()
994 if (topa->z_count) { in pt_topa_entry_for_page()
995 z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1); in pt_topa_entry_for_page()
996 start_idx = topa->z_count + 1; in pt_topa_entry_for_page()
1004 idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0); in pt_topa_entry_for_page()
1005 return &tp->table[idx]; in pt_topa_entry_for_page()
1011 for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) { in pt_topa_entry_for_page()
1013 return &tp->table[idx]; in pt_topa_entry_for_page()
1027 pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te) in pt_topa_prev_entry() argument
1029 unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1); in pt_topa_prev_entry()
1034 if (tp->table != te) in pt_topa_prev_entry()
1035 return --te; in pt_topa_prev_entry()
1037 topa = &tp->topa; in pt_topa_prev_entry()
1038 if (topa == buf->first) in pt_topa_prev_entry()
1039 topa = buf->last; in pt_topa_prev_entry()
1045 return &tp->table[topa->last - 1]; in pt_topa_prev_entry()
1049 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
1055 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
1065 unsigned long head = local64_read(&buf->head); in pt_buffer_reset_markers()
1068 if (buf->single) in pt_buffer_reset_markers()
1072 if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) { in pt_buffer_reset_markers()
1074 return -EINVAL; in pt_buffer_reset_markers()
1083 if (buf->stop_te) { in pt_buffer_reset_markers()
1084 buf->stop_te->stop = 0; in pt_buffer_reset_markers()
1085 buf->stop_te->intr = 0; in pt_buffer_reset_markers()
1088 if (buf->intr_te) in pt_buffer_reset_markers()
1089 buf->intr_te->intr = 0; in pt_buffer_reset_markers()
1092 npages = handle->size >> PAGE_SHIFT; in pt_buffer_reset_markers()
1095 if (!offset_in_page(head + handle->size + 1)) in pt_buffer_reset_markers()
1099 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers()
1101 if (idx != buf->stop_pos) { in pt_buffer_reset_markers()
1102 buf->stop_pos = idx; in pt_buffer_reset_markers()
1103 buf->stop_te = pt_topa_entry_for_page(buf, idx); in pt_buffer_reset_markers()
1104 buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te); in pt_buffer_reset_markers()
1107 wakeup = handle->wakeup >> PAGE_SHIFT; in pt_buffer_reset_markers()
1110 idx = (head >> PAGE_SHIFT) + npages - 1; in pt_buffer_reset_markers()
1114 idx &= buf->nr_pages - 1; in pt_buffer_reset_markers()
1115 if (idx != buf->intr_pos) { in pt_buffer_reset_markers()
1116 buf->intr_pos = idx; in pt_buffer_reset_markers()
1117 buf->intr_te = pt_topa_entry_for_page(buf, idx); in pt_buffer_reset_markers()
1118 buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te); in pt_buffer_reset_markers()
1121 buf->stop_te->stop = 1; in pt_buffer_reset_markers()
1122 buf->stop_te->intr = 1; in pt_buffer_reset_markers()
1123 buf->intr_te->intr = 1; in pt_buffer_reset_markers()
1129 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1146 struct topa_entry *te; in pt_buffer_reset_offsets() local
1149 if (buf->snapshot) in pt_buffer_reset_offsets()
1150 head &= (buf->nr_pages << PAGE_SHIFT) - 1; in pt_buffer_reset_offsets()
1152 if (!buf->single) { in pt_buffer_reset_offsets()
1153 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); in pt_buffer_reset_offsets()
1154 te = pt_topa_entry_for_page(buf, pg); in pt_buffer_reset_offsets()
1156 cur_tp = topa_entry_to_page(te); in pt_buffer_reset_offsets()
1157 buf->cur = &cur_tp->topa; in pt_buffer_reset_offsets()
1158 buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); in pt_buffer_reset_offsets()
1159 buf->output_off = head & (pt_buffer_region_size(buf) - 1); in pt_buffer_reset_offsets()
1161 buf->output_off = head; in pt_buffer_reset_offsets()
1164 local64_set(&buf->head, head); in pt_buffer_reset_offsets()
1165 local_set(&buf->data_size, 0); in pt_buffer_reset_offsets()
1169 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1176 if (buf->single) in pt_buffer_fini_topa()
1179 list_for_each_entry_safe(topa, iter, &buf->tables, list) { in pt_buffer_fini_topa()
1189 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1202 return -ENOMEM; in pt_buffer_init_topa()
1206 while (buf->nr_pages < nr_pages) { in pt_buffer_init_topa()
1210 return -ENOMEM; in pt_buffer_init_topa()
1216 TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first); in pt_buffer_init_topa()
1217 TOPA_ENTRY(buf->last, -1)->end = 1; in pt_buffer_init_topa()
1226 struct page *p = virt_to_page(buf->data_pages[0]); in pt_buffer_try_single()
1227 int ret = -ENOTSUPP, order = 0; in pt_buffer_try_single()
1235 if (!buf->snapshot) in pt_buffer_try_single()
1247 buf->single = true; in pt_buffer_try_single()
1248 buf->nr_pages = nr_pages; in pt_buffer_try_single()
1255 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1256 * @cpu: Cpu on which to allocate, -1 means current.
1271 int node, ret, cpu = event->cpu; in pt_buffer_setup_aux()
1280 if (event->attr.aux_sample_size && !snapshot) in pt_buffer_setup_aux()
1283 if (cpu == -1) in pt_buffer_setup_aux()
1291 buf->snapshot = snapshot; in pt_buffer_setup_aux()
1292 buf->data_pages = pages; in pt_buffer_setup_aux()
1293 buf->stop_pos = -1; in pt_buffer_setup_aux()
1294 buf->intr_pos = -1; in pt_buffer_setup_aux()
1296 INIT_LIST_HEAD(&buf->tables); in pt_buffer_setup_aux()
1312 * pt_buffer_free_aux() - perf AUX deallocation path callback
1326 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); in pt_addr_filters_init()
1333 return -ENOMEM; in pt_addr_filters_init()
1335 if (event->parent) in pt_addr_filters_init()
1336 memcpy(filters, event->parent->hw.addr_filters, in pt_addr_filters_init()
1339 event->hw.addr_filters = filters; in pt_addr_filters_init()
1346 kfree(event->hw.addr_filters); in pt_addr_filters_fini()
1347 event->hw.addr_filters = NULL; in pt_addr_filters_fini()
1365 if (!filter->size || in pt_event_addr_filters_validate()
1366 filter->action == PERF_ADDR_FILTER_ACTION_START) in pt_event_addr_filters_validate()
1367 return -EOPNOTSUPP; in pt_event_addr_filters_validate()
1369 if (!filter->path.dentry) { in pt_event_addr_filters_validate()
1370 if (!valid_kernel_ip(filter->offset)) in pt_event_addr_filters_validate()
1371 return -EINVAL; in pt_event_addr_filters_validate()
1373 if (!valid_kernel_ip(filter->offset + filter->size)) in pt_event_addr_filters_validate()
1374 return -EINVAL; in pt_event_addr_filters_validate()
1378 return -EOPNOTSUPP; in pt_event_addr_filters_validate()
1388 struct perf_addr_filter_range *fr = event->addr_filter_ranges; in pt_event_addr_filters_sync()
1389 struct pt_filters *filters = event->hw.addr_filters; in pt_event_addr_filters_sync()
1396 list_for_each_entry(filter, &head->list, entry) { in pt_event_addr_filters_sync()
1397 if (filter->path.dentry && !fr[range].start) { in pt_event_addr_filters_sync()
1402 msr_b = msr_a + fr[range].size - 1; in pt_event_addr_filters_sync()
1405 filters->filter[range].msr_a = msr_a; in pt_event_addr_filters_sync()
1406 filters->filter[range].msr_b = msr_b; in pt_event_addr_filters_sync()
1407 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER) in pt_event_addr_filters_sync()
1408 filters->filter[range].config = 1; in pt_event_addr_filters_sync()
1410 filters->filter[range].config = 2; in pt_event_addr_filters_sync()
1414 filters->nr_filters = range; in pt_event_addr_filters_sync()
1418 * intel_pt_interrupt() - PT PMI handler
1424 struct perf_event *event = pt->handle.event; in intel_pt_interrupt()
1429 * do anything (particularly, re-enable) for this event here. in intel_pt_interrupt()
1431 if (!READ_ONCE(pt->handle_nmi)) in intel_pt_interrupt()
1439 buf = perf_get_aux(&pt->handle); in intel_pt_interrupt()
1449 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); in intel_pt_interrupt()
1451 if (!event->hw.state) { in intel_pt_interrupt()
1454 buf = perf_aux_output_begin(&pt->handle, event); in intel_pt_interrupt()
1456 event->hw.state = PERF_HES_STOPPED; in intel_pt_interrupt()
1460 pt_buffer_reset_offsets(buf, pt->handle.head); in intel_pt_interrupt()
1462 ret = pt_buffer_reset_markers(buf, &pt->handle); in intel_pt_interrupt()
1464 perf_aux_output_end(&pt->handle, 0); in intel_pt_interrupt()
1490 WRITE_ONCE(pt->vmx_on, on); in intel_pt_handle_vmx()
1496 event = pt->handle.event; in intel_pt_handle_vmx()
1498 perf_aux_output_flag(&pt->handle, in intel_pt_handle_vmx()
1503 wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); in intel_pt_handle_vmx()
1515 struct hw_perf_event *hwc = &event->hw; in pt_event_start()
1519 buf = perf_aux_output_begin(&pt->handle, event); in pt_event_start()
1523 pt_buffer_reset_offsets(buf, pt->handle.head); in pt_event_start()
1524 if (!buf->snapshot) { in pt_event_start()
1525 if (pt_buffer_reset_markers(buf, &pt->handle)) in pt_event_start()
1529 WRITE_ONCE(pt->handle_nmi, 1); in pt_event_start()
1530 hwc->state = 0; in pt_event_start()
1538 perf_aux_output_end(&pt->handle, 0); in pt_event_start()
1540 hwc->state = PERF_HES_STOPPED; in pt_event_start()
1551 WRITE_ONCE(pt->handle_nmi, 0); in pt_event_stop()
1555 if (event->hw.state == PERF_HES_STOPPED) in pt_event_stop()
1558 event->hw.state = PERF_HES_STOPPED; in pt_event_stop()
1561 struct pt_buffer *buf = perf_get_aux(&pt->handle); in pt_event_stop()
1566 if (WARN_ON_ONCE(pt->handle.event != event)) in pt_event_stop()
1575 if (buf->snapshot) in pt_event_stop()
1576 pt->handle.head = in pt_event_stop()
1577 local_xchg(&buf->data_size, in pt_event_stop()
1578 buf->nr_pages << PAGE_SHIFT); in pt_event_stop()
1579 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0)); in pt_event_stop()
1588 struct pt_buffer *buf = perf_get_aux(&pt->handle); in pt_event_snapshot_aux()
1599 if (WARN_ON_ONCE(!buf->snapshot)) in pt_event_snapshot_aux()
1605 if (READ_ONCE(pt->handle_nmi)) in pt_event_snapshot_aux()
1611 to = local_read(&buf->data_size); in pt_event_snapshot_aux()
1613 from = buf->nr_pages << PAGE_SHIFT; in pt_event_snapshot_aux()
1614 from += to - size; in pt_event_snapshot_aux()
1616 ret = perf_output_copy_aux(&pt->handle, handle, from, to); in pt_event_snapshot_aux()
1621 * preempted by anything that touches pt->handle_nmi. in pt_event_snapshot_aux()
1623 if (pt->handle_nmi) in pt_event_snapshot_aux()
1637 struct hw_perf_event *hwc = &event->hw; in pt_event_add()
1638 int ret = -EBUSY; in pt_event_add()
1640 if (pt->handle.event) in pt_event_add()
1645 ret = -EINVAL; in pt_event_add()
1646 if (hwc->state == PERF_HES_STOPPED) in pt_event_add()
1649 hwc->state = PERF_HES_STOPPED; in pt_event_add()
1670 if (event->attr.type != pt_pmu.pmu.type) in pt_event_init()
1671 return -ENOENT; in pt_event_init()
1674 return -EINVAL; in pt_event_init()
1677 return -EBUSY; in pt_event_init()
1681 return -ENOMEM; in pt_event_init()
1684 event->destroy = pt_event_destroy; in pt_event_init()
1693 if (pt->handle.event) in cpu_emergency_stop_pt()
1694 pt_event_stop(pt->handle.event, PERF_EF_UPDATE); in cpu_emergency_stop_pt()
1699 return event->pmu == &pt_pmu.pmu; in is_intel_pt_event()
1709 return -ENODEV; in pt_init()
1725 return -EBUSY; in pt_init()
1734 return -ENODEV; in pt_init()
1757 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1); in pt_init()