/tools/perf/arch/x86/annotate/ |
D | instructions.c | 3 { .name = "adc", .ops = &mov_ops, }, 4 { .name = "adcb", .ops = &mov_ops, }, 5 { .name = "adcl", .ops = &mov_ops, }, 6 { .name = "add", .ops = &mov_ops, }, 7 { .name = "addl", .ops = &mov_ops, }, 8 { .name = "addq", .ops = &mov_ops, }, 9 { .name = "addsd", .ops = &mov_ops, }, 10 { .name = "addw", .ops = &mov_ops, }, 11 { .name = "and", .ops = &mov_ops, }, 12 { .name = "andb", .ops = &mov_ops, }, [all …]
|
/tools/perf/arch/s390/annotate/ |
D | instructions.c | 4 static int s390_call__parse(struct arch *arch, struct ins_operands *ops, in s390_call__parse() argument 13 tok = strchr(ops->raw, ','); in s390_call__parse() 17 ops->target.addr = strtoull(tok + 1, &endptr, 16); in s390_call__parse() 34 ops->target.name = strdup(name); in s390_call__parse() 37 if (ops->target.name == NULL) in s390_call__parse() 39 target.addr = map__objdump_2mem(map, ops->target.addr); in s390_call__parse() 42 map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr) in s390_call__parse() 43 ops->target.sym = target.ms.sym; in s390_call__parse() 49 struct ins_operands *ops, int max_ins_name); 57 struct ins_operands *ops, in s390_mov__parse() argument [all …]
|
/tools/perf/arch/arm64/annotate/ |
D | instructions.c | 13 struct ins_operands *ops, in arm64_mov__parse() argument 16 char *s = strchr(ops->raw, ','), *target, *endptr; in arm64_mov__parse() 22 ops->source.raw = strdup(ops->raw); in arm64_mov__parse() 25 if (ops->source.raw == NULL) in arm64_mov__parse() 29 ops->target.raw = strdup(target); in arm64_mov__parse() 30 if (ops->target.raw == NULL) in arm64_mov__parse() 33 ops->target.addr = strtoull(target, &endptr, 16); in arm64_mov__parse() 46 ops->target.name = strdup(s); in arm64_mov__parse() 49 if (ops->target.name == NULL) in arm64_mov__parse() 55 zfree(&ops->target.raw); in arm64_mov__parse() [all …]
|
/tools/perf/arch/csky/annotate/ |
D | instructions.c | 9 struct ins_ops *ops = NULL; in csky__associate_ins_ops() local 24 ops = &jump_ops; in csky__associate_ins_ops() 30 ops = &call_ops; in csky__associate_ins_ops() 34 ops = &ret_ops; in csky__associate_ins_ops() 36 if (ops) in csky__associate_ins_ops() 37 arch__associate_ins_ops(arch, name, ops); in csky__associate_ins_ops() 38 return ops; in csky__associate_ins_ops()
|
/tools/usb/usbip/libsrc/ |
D | usbip_host_common.h | 43 struct usbip_host_driver_ops ops; member 57 if (!hdriver->ops.open) in usbip_driver_open() 59 return hdriver->ops.open(hdriver); in usbip_driver_open() 64 if (!hdriver->ops.close) in usbip_driver_close() 66 hdriver->ops.close(hdriver); in usbip_driver_close() 71 if (!hdriver->ops.refresh_device_list) in usbip_refresh_device_list() 73 return hdriver->ops.refresh_device_list(hdriver); in usbip_refresh_device_list() 79 if (!hdriver->ops.get_device) in usbip_get_device() 81 return hdriver->ops.get_device(hdriver, num); in usbip_get_device()
|
D | usbip_host_common.c | 82 if (hdriver->ops.read_device(edev->sudev, &edev->udev) < 0) in usbip_exported_device_new() 103 if (!hdriver->ops.read_interface) in usbip_exported_device_new() 105 hdriver->ops.read_interface(&edev->udev, i, &edev->uinf[i]); in usbip_exported_device_new() 140 if (hdriver->ops.is_my_device(dev)) { in refresh_exported_devices()
|
D | usbip_host_driver.c | 44 .ops = {
|
/tools/perf/arch/sparc/annotate/ |
D | instructions.c | 122 struct ins_ops *ops = NULL; in sparc__associate_instruction_ops() local 127 ops = &call_ops; in sparc__associate_instruction_ops() 131 ops = &ret_ops; in sparc__associate_instruction_ops() 133 ops = &mov_ops; in sparc__associate_instruction_ops() 144 ops = &jump_ops; in sparc__associate_instruction_ops() 146 ops = &jump_ops; in sparc__associate_instruction_ops() 150 ops = &jump_ops; in sparc__associate_instruction_ops() 154 if (ops) in sparc__associate_instruction_ops() 155 arch__associate_ins_ops(arch, name, ops); in sparc__associate_instruction_ops() 157 return ops; in sparc__associate_instruction_ops()
|
/tools/perf/arch/powerpc/annotate/ |
D | instructions.c | 7 struct ins_ops *ops; in powerpc__associate_instruction_ops() local 19 ops = &jump_ops; in powerpc__associate_instruction_ops() 39 ops = &call_ops; in powerpc__associate_instruction_ops() 46 ops = &ret_ops; in powerpc__associate_instruction_ops() 48 arch__associate_ins_ops(arch, name, ops); in powerpc__associate_instruction_ops() 49 return ops; in powerpc__associate_instruction_ops()
|
/tools/perf/util/ |
D | annotate.c | 131 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) in arch__associate_ins_ops() argument 144 ins->ops = ops; in arch__associate_ins_ops() 207 static void ins__delete(struct ins_operands *ops) in ins__delete() argument 209 if (ops == NULL) in ins__delete() 211 zfree(&ops->source.raw); in ins__delete() 212 zfree(&ops->source.name); in ins__delete() 213 zfree(&ops->target.raw); in ins__delete() 214 zfree(&ops->target.name); in ins__delete() 218 struct ins_operands *ops, int max_ins_name) in ins__raw_scnprintf() argument 220 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw); in ins__raw_scnprintf() [all …]
|
D | unwind-libunwind.c | 15 static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) in unwind__register_ops() argument 17 maps->unwind_libunwind_ops = ops; in unwind__register_ops() 24 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops; in unwind__prepare_access() local 50 ops = x86_32_unwind_libunwind_ops; in unwind__prepare_access() 53 ops = arm64_unwind_libunwind_ops; in unwind__prepare_access() 56 if (!ops) { in unwind__prepare_access() 61 unwind__register_ops(maps, ops); in unwind__prepare_access()
|
D | annotate.h | 29 struct ins_ops *ops; member 53 struct ins_operands *ops; member 61 void (*free)(struct ins_operands *ops); 62 int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms); 64 struct ins_operands *ops, int max_ins_name); 71 int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_na… 153 struct ins_operands ops; member 191 return dl->ops.target.offset_avail && !dl->ops.target.outside; in disasm_line__has_local_offset() 216 struct annotation_write_ops *ops,
|
/tools/perf/arch/powerpc/util/ |
D | skip-callchain-idx.c | 50 Dwarf_Op *ops = &dummy; in check_return_reg() local 54 result = dwarf_frame_register(frame, ra_regno, ops_mem, &ops, &nops); in check_return_reg() 65 if ((nops != 0 || ops != NULL) && in check_return_reg() 66 !(nops == 1 && ops[0].atom == DW_OP_regx && in check_return_reg() 67 ops[0].number2 == 0 && ops[0].offset == 0)) in check_return_reg() 74 result = dwarf_frame_cfa(frame, &ops, &nops); in check_return_reg() 84 if (nops == 1 && ops[0].atom == DW_OP_bregx && ops[0].number == 1 && in check_return_reg() 85 ops[0].number2 == 0) in check_return_reg()
|
D | kvm-stat.c | 101 { .name = "vmexit", .ops = &exit_events }, 102 { .name = "hcall", .ops = &hcall_events },
|
/tools/testing/nvdimm/test/ |
D | iomap.c | 45 struct iomap_ops *ops; in __get_nfit_res() local 47 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list); in __get_nfit_res() 48 if (ops) in __get_nfit_res() 49 return ops->nfit_test_lookup(resource); in __get_nfit_res() 105 if (pgmap->ops && pgmap->ops->kill) in nfit_test_kill() 106 pgmap->ops->kill(pgmap); in nfit_test_kill() 110 if (pgmap->ops && pgmap->ops->cleanup) { in nfit_test_kill() 111 pgmap->ops->cleanup(pgmap); in nfit_test_kill() 136 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in __wrap_devm_memremap_pages() 146 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in __wrap_devm_memremap_pages() [all …]
|
/tools/perf/arch/arm/annotate/ |
D | instructions.c | 16 struct ins_ops *ops; in arm__associate_instruction_ops() local 20 ops = &call_ops; in arm__associate_instruction_ops() 22 ops = &jump_ops; in arm__associate_instruction_ops() 26 arch__associate_ins_ops(arch, name, ops); in arm__associate_instruction_ops() 27 return ops; in arm__associate_instruction_ops()
|
/tools/lib/perf/ |
D | evlist.c | 419 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_evsel() argument 439 map = ops->get(evlist, overwrite, idx); in mmap_per_evsel() 471 if (ops->mmap(map, mp, *output, evlist_cpu) < 0) in mmap_per_evsel() 503 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_thread() argument 513 if (ops->idx) in mmap_per_thread() 514 ops->idx(evlist, mp, thread, false); in mmap_per_thread() 516 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread, in mmap_per_thread() 529 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_cpu() argument 540 if (ops->idx) in mmap_per_cpu() 541 ops->idx(evlist, mp, cpu, true); in mmap_per_cpu() [all …]
|
/tools/perf/bench/ |
D | futex-hash.c | 50 unsigned long ops; member 72 unsigned long ops = w->ops; /* avoid cacheline bouncing */ in workerfn() local 82 for (i = 0; i < nfutexes; i++, ops++) { in workerfn() 96 w->ops = ops; in workerfn() 209 worker[i].ops / bench__runtime.tv_sec : 0; in bench_futex_hash()
|
D | futex-lock-pi.c | 30 unsigned long ops; member 82 unsigned long ops = w->ops; in workerfn() local 111 ops++; /* account for thread's share of work */ in workerfn() 114 w->ops = ops; in workerfn() 215 worker[i].ops / bench__runtime.tv_sec : 0; in bench_futex_lock_pi()
|
D | epoll-wait.c | 122 unsigned long ops; member 187 unsigned long ops = w->ops; in workerfn() local 230 ops++; in workerfn() 236 w->ops = ops; in workerfn() 523 worker[i].ops / bench__runtime.tv_sec : 0; in bench_epoll_wait()
|
D | epoll-ctl.c | 70 unsigned long ops[EPOLL_NR_OPS]; member 157 w->ops[op]++; in do_epoll_op() 393 t[j] = worker[i].ops[j]; in bench_epoll_ctl()
|
/tools/perf/arch/x86/util/ |
D | kvm-stat.c | 145 { .name = "vmexit", .ops = &exit_events }, 146 { .name = "mmio", .ops = &mmio_events }, 147 { .name = "ioport", .ops = &ioport_events },
|
/tools/perf/ui/browsers/ |
D | annotate.c | 102 struct annotation_write_ops ops = { in annotate_browser__write() local 120 ops.width += 1; in annotate_browser__write() 122 annotation_line__write(al, notes, &ops, ab->opts); in annotate_browser__write() 124 if (ops.current_entry) in annotate_browser__write() 137 name = pos->ops.locked.ins.name; in is_fused() 186 target = notes->offsets[cursor->ops.target.offset]; in annotate_browser__draw_current_jump() 189 cursor->ops.target.offset); in annotate_browser__draw_current_jump() 418 if (!dl->ops.target.sym) { in annotate_browser__callq() 423 notes = symbol__annotation(dl->ops.target.sym); in annotate_browser__callq() 426 if (!symbol__hists(dl->ops.target.sym, evsel->evlist->core.nr_entries)) { in annotate_browser__callq() [all …]
|
/tools/perf/Documentation/ |
D | perf-bench.txt | 35 170792 ops/sec 133 123581 ops/sec 140 59004 ops/sec 146 Suite for evaluating performance of core system call throughput (both usecs/op and ops/sec metrics).
|
/tools/perf/arch/s390/util/ |
D | kvm-stat.c | 93 { .name = "vmexit", .ops = &exit_events },
|