| /tools/perf/util/ |
| D | cputopo.c | 35 static int build_cpu_topology(struct cpu_topology *tp, int cpu) in build_cpu_topology() argument 64 for (i = 0; i < tp->package_cpus_lists; i++) { in build_cpu_topology() 65 if (!strcmp(buf, tp->package_cpus_list[i])) in build_cpu_topology() 68 if (i == tp->package_cpus_lists) { in build_cpu_topology() 69 tp->package_cpus_list[i] = buf; in build_cpu_topology() 70 tp->package_cpus_lists++; in build_cpu_topology() 77 if (!tp->die_cpus_list) in build_cpu_topology() 95 for (i = 0; i < tp->die_cpus_lists; i++) { in build_cpu_topology() 96 if (!strcmp(buf, tp->die_cpus_list[i])) in build_cpu_topology() 99 if (i == tp->die_cpus_lists) { in build_cpu_topology() [all …]
|
| D | cputopo.h | 65 void cpu_topology__delete(struct cpu_topology *tp); 73 void numa_topology__delete(struct numa_topology *tp); 76 void hybrid_topology__delete(struct hybrid_topology *tp);
|
| D | probe-event.c | 656 static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, in find_perf_probe_point_from_dwarf() argument 662 u64 addr = tp->address; in find_perf_probe_point_from_dwarf() 671 ret = get_text_start_address(tp->module, &stext, NULL); in find_perf_probe_point_from_dwarf() 675 } else if (tp->symbol) { in find_perf_probe_point_from_dwarf() 677 ret = kernel_get_symbol_address_by_name(tp->symbol, &addr, in find_perf_probe_point_from_dwarf() 678 false, !!tp->module); in find_perf_probe_point_from_dwarf() 681 addr += tp->offset; in find_perf_probe_point_from_dwarf() 685 tp->module ? : "kernel"); in find_perf_probe_point_from_dwarf() 687 dinfo = debuginfo_cache__open(tp->module, verbose <= 0); in find_perf_probe_point_from_dwarf() 694 pp->retprobe = tp->retprobe; in find_perf_probe_point_from_dwarf() [all …]
|
| D | event.c | 525 struct perf_record_text_poke_event *tp = &event->text_poke; in perf_event__fprintf_text_poke() local 529 ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr); in perf_event__fprintf_text_poke() 534 al.map = maps__find(machine__kernel_maps(machine), tp->addr); in perf_event__fprintf_text_poke() 536 al.addr = map__map_ip(al.map, tp->addr); in perf_event__fprintf_text_poke() 543 ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len); in perf_event__fprintf_text_poke() 545 ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer, in perf_event__fprintf_text_poke() 548 ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16, in perf_event__fprintf_text_poke()
|
| D | header.c | 588 struct cpu_topology *tp; in write_cpu_topology() local 592 tp = cpu_topology__new(); in write_cpu_topology() 593 if (!tp) in write_cpu_topology() 596 ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists)); in write_cpu_topology() 600 for (i = 0; i < tp->package_cpus_lists; i++) { in write_cpu_topology() 601 ret = do_write_string(ff, tp->package_cpus_list[i]); in write_cpu_topology() 605 ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists)); in write_cpu_topology() 609 for (i = 0; i < tp->core_cpus_lists; i++) { in write_cpu_topology() 610 ret = do_write_string(ff, tp->core_cpus_list[i]); in write_cpu_topology() 630 if (!tp->die_cpus_lists) in write_cpu_topology() [all …]
|
| /tools/testing/selftests/bpf/progs/ |
| D | bpf_cc_cubic.c | 49 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local 53 rate = (__u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate() 63 if (tp->snd_cwnd < tp->snd_ssthresh / 2) in tcp_update_pacing_rate() 68 rate *= max(tp->snd_cwnd, tp->packets_out); in tcp_update_pacing_rate() 70 if (tp->srtt_us) in tcp_update_pacing_rate() 71 rate = div64_u64(rate, (__u64)tp->srtt_us); in tcp_update_pacing_rate() 79 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local 81 __u32 pkts_in_flight = tp->packets_out - (tp->sacked_out + tp->lost_out) + tp->retrans_out; in tcp_cwnd_reduction() 82 int delta = tp->snd_ssthresh - pkts_in_flight; in tcp_cwnd_reduction() 84 if (newly_acked_sacked <= 0 || !tp->prior_cwnd) in tcp_cwnd_reduction() [all …]
|
| D | tcp_ca_write_sk_pacing.c | 13 static unsigned int tcp_left_out(const struct tcp_sock *tp) in tcp_left_out() argument 15 return tp->sacked_out + tp->lost_out; in tcp_left_out() 18 static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) in tcp_packets_in_flight() argument 20 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; in tcp_packets_in_flight() 38 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 40 ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) / in BPF_PROG() 41 (tp->srtt_us ?: 1U << 3); in BPF_PROG() 43 tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1; in BPF_PROG()
|
| D | bpf_dctcp.c | 59 static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca) in dctcp_reset() argument 61 ca->next_seq = tp->snd_nxt; in dctcp_reset() 63 ca->old_delivered = tp->delivered; in dctcp_reset() 64 ca->old_delivered_ce = tp->delivered_ce; in dctcp_reset() 70 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 74 if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) { in BPF_PROG() 101 ca->prior_rcv_nxt = tp->rcv_nxt; in BPF_PROG() 106 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0); in BPF_PROG() 109 bpf_sk_storage_delete(&sk_stg_map, (void *)tp); in BPF_PROG() 111 dctcp_reset(tp, ca); in BPF_PROG() [all …]
|
| D | bpf_iter_setsockopt.c | 11 tp = NULL; \ 13 tp = bpf_skc_to_tcp_sock(_skc); \ 14 sk = (struct sock *)tp; \ 16 tp; \ 43 struct tcp_sock *tp; in change_tcp_cc() local 57 if (bpf_getsockopt(tp, SOL_TCP, TCP_CONGESTION, in change_tcp_cc() 67 bpf_setsockopt(tp, SOL_TCP, TCP_CONGESTION, dctcp_cc, sizeof(dctcp_cc)); in change_tcp_cc()
|
| D | vrf_socket_lookup.c | 21 struct bpf_sock_tuple *tp; in socket_lookup() local 36 tp = (struct bpf_sock_tuple *)&iph->saddr; in socket_lookup() 37 tplen = sizeof(tp->ipv4); in socket_lookup() 38 if ((void *)tp + tplen > data_end) in socket_lookup() 44 sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup() 46 sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup() 49 sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup()
|
| D | test_btf_skc_cls_ingress.c | 22 struct tcp_sock *tp, in test_syncookie_helper() argument 41 mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h), in test_syncookie_helper() 51 int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h), in test_syncookie_helper() 114 struct tcp_sock *tp; in handle_ip6_tcp() local 116 tp = bpf_skc_to_tcp_sock(bpf_skc); in handle_ip6_tcp() 117 if (!tp) { in handle_ip6_tcp() 122 if (bpf_sk_assign(skb, tp, 0)) { in handle_ip6_tcp() 127 listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num; in handle_ip6_tcp() 129 test_syncookie_helper(ip6h, th, tp, skb); in handle_ip6_tcp() 130 bpf_sk_release(tp); in handle_ip6_tcp()
|
| D | bpf_iter_tcp4.c | 74 static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, in dump_tcp_sock() argument 88 icsk = &tp->inet_conn; in dump_tcp_sock() 118 rx_queue = tp->rcv_nxt - tp->copied_seq; in dump_tcp_sock() 127 tp->write_seq - tp->snd_una, rx_queue, in dump_tcp_sock() 135 tp, in dump_tcp_sock() 139 tp->snd_cwnd, in dump_tcp_sock() 141 : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) in dump_tcp_sock() 202 struct tcp_sock *tp; in dump_tcp4() local 220 tp = bpf_skc_to_tcp_sock(sk_common); in dump_tcp4() 221 if (tp) in dump_tcp4() [all …]
|
| D | bpf_cubic.c | 31 extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym; 32 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym; 171 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() local 175 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 388 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 394 if (tcp_in_slow_start(tp)) { in BPF_PROG() 397 acked = tcp_slow_start(tp, acked); in BPF_PROG() 401 bictcp_update(ca, tp->snd_cwnd, acked); in BPF_PROG() 402 tcp_cong_avoid_ai(tp, ca->cnt, acked); in BPF_PROG() 408 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local [all …]
|
| D | bpf_iter_tcp6.c | 74 static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, in dump_tcp6_sock() argument 88 icsk = &tp->tcp.inet_conn; in dump_tcp6_sock() 118 rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; in dump_tcp6_sock() 131 tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, in dump_tcp6_sock() 139 tp, in dump_tcp6_sock() 143 tp->tcp.snd_cwnd, in dump_tcp6_sock() 145 : (tcp_in_initial_slowstart(&tp->tcp) ? -1 in dump_tcp6_sock() 146 : tp->tcp.snd_ssthresh) in dump_tcp6_sock() 218 struct tcp6_sock *tp; in dump_tcp6() local 236 tp = bpf_skc_to_tcp6_sock(sk_common); in dump_tcp6() [all …]
|
| D | bpf_tracing_net.h | 158 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) in tcp_in_slow_start() argument 160 return tp->snd_cwnd < tp->snd_ssthresh; in tcp_in_slow_start() 165 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited() local 168 if (tcp_in_slow_start(tp)) in tcp_is_cwnd_limited() 169 return tp->snd_cwnd < 2 * tp->max_packets_out; in tcp_is_cwnd_limited() 171 return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited); in tcp_is_cwnd_limited()
|
| D | test_tcpbpf_kern.c | 20 struct tcp_sock *tp; in get_tp_window_clamp() local 25 tp = bpf_skc_to_tcp_sock(sk); in get_tp_window_clamp() 26 if (!tp) in get_tp_window_clamp() 28 return tp->window_clamp; in get_tp_window_clamp()
|
| D | test_sock_fields.c | 129 struct bpf_tcp_sock *tp, *tp_ret; in egress_read_sock_fields() local 169 tp = bpf_tcp_sock(sk); in egress_read_sock_fields() 170 if (!tp) in egress_read_sock_fields() 174 tpcpy(tp_ret, tp); in egress_read_sock_fields() 223 struct bpf_tcp_sock *tp; in ingress_read_sock_fields() local 247 tp = bpf_tcp_sock(sk); in ingress_read_sock_fields() 248 if (!tp) in ingress_read_sock_fields() 252 tpcpy(&listen_tp, tp); in ingress_read_sock_fields()
|
| /tools/testing/selftests/timens/ |
| D | gettime_perf.c | 49 struct timespec tp, start; in test() local 54 tp = start; in test() 55 for (tp = start; start.tv_sec + timeout > tp.tv_sec || in test() 56 (start.tv_sec + timeout == tp.tv_sec && in test() 57 start.tv_nsec > tp.tv_nsec); i++) { in test() 58 vdso_clock_gettime(clockid, &tp); in test()
|
| /tools/testing/selftests/bpf/prog_tests/ |
| D | sock_fields.c | 83 static void print_tp(const struct bpf_tcp_sock *tp, const char *prefix) in print_tp() argument 92 tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh, in print_tp() 93 tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache, in print_tp() 94 tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us, in print_tp() 95 tp->packets_out, tp->retrans_out, tp->total_retrans, in print_tp() 96 tp->segs_in, tp->data_segs_in, tp->segs_out, in print_tp() 97 tp->data_segs_out, tp->lost_out, tp->sacked_out, in print_tp() 98 tp->bytes_received, tp->bytes_acked); in print_tp()
|
| /tools/testing/selftests/filesystems/fuse/ |
| D | fuse_daemon.c | 29 int tp = -1; in display_trace() local 42 TEST(tp = s_open(s_path(tracing_folder(), s("trace_pipe")), in display_trace() 43 O_RDONLY | O_CLOEXEC), tp != -1); in display_trace() 45 TEST(bytes_read = read(tp, &c, sizeof(c)), in display_trace() 55 close(tp); in display_trace()
|
| /tools/perf/python/ |
| D | tracepoint.py | 18 tp = tracepoint("sched", "sched_switch") 23 evlist.add(tp)
|
| /tools/testing/selftests/bpf/benchs/ |
| D | run_bench_trigger.sh | 8 rawtp tp \
|
| /tools/power/pm-graph/ |
| D | bootgraph.py | 303 tp = aslib.TestProps() 312 if re.match(tp.stampfmt, line): 313 tp.stamp = line 315 elif re.match(tp.sysinfofmt, line): 316 tp.sysinfo = line 318 elif re.match(tp.cmdlinefmt, line): 319 tp.cmdline = line 321 elif re.match(tp.kparamsfmt, line): 322 tp.kparams = line 368 if tp.stamp: [all …]
|
| /tools/net/ynl/ |
| D | ethtool.py | 50 field, name, tp = spec 53 tp = 'int' 56 if tp == 'yn': 58 elif tp == 'bool' or isinstance(value, bool):
|
| /tools/include/uapi/linux/ |
| D | tcp.h | 70 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) argument
|