• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Anton Protopopov
3 //
4 // Based on tcpconnect(8) from BCC by Brendan Gregg
5 #include <vmlinux.h>
6 
7 #include <bpf/bpf_helpers.h>
8 #include <bpf/bpf_core_read.h>
9 #include <bpf/bpf_tracing.h>
10 
11 #include "maps.bpf.h"
12 #include "tcpconnect.h"
13 
14 SEC(".rodata") int filter_ports[MAX_PORTS];
15 const volatile int filter_ports_len = 0;
16 const volatile uid_t filter_uid = -1;
17 const volatile pid_t filter_pid = 0;
18 const volatile bool do_count = 0;
19 
20 /* Define here, because there are conflicts with include files */
21 #define AF_INET		2
22 #define AF_INET6	10
23 
24 struct {
25 	__uint(type, BPF_MAP_TYPE_HASH);
26 	__uint(max_entries, MAX_ENTRIES);
27 	__type(key, u32);
28 	__type(value, struct sock *);
29 	__uint(map_flags, BPF_F_NO_PREALLOC);
30 } sockets SEC(".maps");
31 
32 struct {
33 	__uint(type, BPF_MAP_TYPE_HASH);
34 	__uint(max_entries, MAX_ENTRIES);
35 	__type(key, struct ipv4_flow_key);
36 	__type(value, u64);
37 	__uint(map_flags, BPF_F_NO_PREALLOC);
38 } ipv4_count SEC(".maps");
39 
40 struct {
41 	__uint(type, BPF_MAP_TYPE_HASH);
42 	__uint(max_entries, MAX_ENTRIES);
43 	__type(key, struct ipv6_flow_key);
44 	__type(value, u64);
45 	__uint(map_flags, BPF_F_NO_PREALLOC);
46 } ipv6_count SEC(".maps");
47 
48 struct {
49 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
50 	__uint(key_size, sizeof(u32));
51 	__uint(value_size, sizeof(u32));
52 } events SEC(".maps");
53 
filter_port(__u16 port)54 static __always_inline bool filter_port(__u16 port)
55 {
56 	int i;
57 
58 	if (filter_ports_len == 0)
59 		return false;
60 
61 	for (i = 0; i < filter_ports_len; i++) {
62 		if (port == filter_ports[i])
63 			return false;
64 	}
65 	return true;
66 }
67 
68 static __always_inline int
enter_tcp_connect(struct pt_regs * ctx,struct sock * sk)69 enter_tcp_connect(struct pt_regs *ctx, struct sock *sk)
70 {
71 	__u64 pid_tgid = bpf_get_current_pid_tgid();
72 	__u32 pid = pid_tgid >> 32;
73 	__u32 tid = pid_tgid;
74 	__u32 uid;
75 
76 	if (filter_pid && pid != filter_pid)
77 		return 0;
78 
79 	uid = bpf_get_current_uid_gid();
80 	if (filter_uid != (uid_t) -1 && uid != filter_uid)
81 		return 0;
82 
83 	bpf_map_update_elem(&sockets, &tid, &sk, 0);
84 	return 0;
85 }
86 
count_v4(struct sock * sk,__u16 dport)87 static  __always_inline void count_v4(struct sock *sk, __u16 dport)
88 {
89 	struct ipv4_flow_key key = {};
90 	static __u64 zero;
91 	__u64 *val;
92 
93 	BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr);
94 	BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr);
95 	key.dport = dport;
96 	val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero);
97 	if (val)
98 		__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
99 }
100 
count_v6(struct sock * sk,__u16 dport)101 static __always_inline void count_v6(struct sock *sk, __u16 dport)
102 {
103 	struct ipv6_flow_key key = {};
104 	static const __u64 zero;
105 	__u64 *val;
106 
107 	BPF_CORE_READ_INTO(&key.saddr, sk,
108 			   __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
109 	BPF_CORE_READ_INTO(&key.daddr, sk,
110 			   __sk_common.skc_v6_daddr.in6_u.u6_addr32);
111 	key.dport = dport;
112 
113 	val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero);
114 	if (val)
115 		__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
116 }
117 
118 static __always_inline void
trace_v4(struct pt_regs * ctx,pid_t pid,struct sock * sk,__u16 dport)119 trace_v4(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 dport)
120 {
121 	struct event event = {};
122 
123 	event.af = AF_INET;
124 	event.pid = pid;
125 	event.uid = bpf_get_current_uid_gid();
126 	event.ts_us = bpf_ktime_get_ns() / 1000;
127 	BPF_CORE_READ_INTO(&event.saddr_v4, sk, __sk_common.skc_rcv_saddr);
128 	BPF_CORE_READ_INTO(&event.daddr_v4, sk, __sk_common.skc_daddr);
129 	event.dport = dport;
130 	bpf_get_current_comm(event.task, sizeof(event.task));
131 
132 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
133 			      &event, sizeof(event));
134 }
135 
136 static __always_inline void
trace_v6(struct pt_regs * ctx,pid_t pid,struct sock * sk,__u16 dport)137 trace_v6(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 dport)
138 {
139 	struct event event = {};
140 
141 	event.af = AF_INET6;
142 	event.pid = pid;
143 	event.uid = bpf_get_current_uid_gid();
144 	event.ts_us = bpf_ktime_get_ns() / 1000;
145 	BPF_CORE_READ_INTO(&event.saddr_v6, sk,
146 			   __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
147 	BPF_CORE_READ_INTO(&event.daddr_v6, sk,
148 			   __sk_common.skc_v6_daddr.in6_u.u6_addr32);
149 	event.dport = dport;
150 	bpf_get_current_comm(event.task, sizeof(event.task));
151 
152 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
153 			      &event, sizeof(event));
154 }
155 
156 static __always_inline int
exit_tcp_connect(struct pt_regs * ctx,int ret,int ip_ver)157 exit_tcp_connect(struct pt_regs *ctx, int ret, int ip_ver)
158 {
159 	__u64 pid_tgid = bpf_get_current_pid_tgid();
160 	__u32 pid = pid_tgid >> 32;
161 	__u32 tid = pid_tgid;
162 	struct sock **skpp;
163 	struct sock *sk;
164 	__u16 dport;
165 
166 	skpp = bpf_map_lookup_elem(&sockets, &tid);
167 	if (!skpp)
168 		return 0;
169 
170 	if (ret)
171 		goto end;
172 
173 	sk = *skpp;
174 
175 	BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport);
176 	if (filter_port(dport))
177 		goto end;
178 
179 	if (do_count) {
180 		if (ip_ver == 4)
181 			count_v4(sk, dport);
182 		else
183 			count_v6(sk, dport);
184 	} else {
185 		if (ip_ver == 4)
186 			trace_v4(ctx, pid, sk, dport);
187 		else
188 			trace_v6(ctx, pid, sk, dport);
189 	}
190 
191 end:
192 	bpf_map_delete_elem(&sockets, &tid);
193 	return 0;
194 }
195 
196 SEC("kprobe/tcp_v4_connect")
BPF_KPROBE(tcp_v4_connect,struct sock * sk)197 int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
198 {
199 	return enter_tcp_connect(ctx, sk);
200 }
201 
202 SEC("kretprobe/tcp_v4_connect")
BPF_KRETPROBE(tcp_v4_connect_ret,int ret)203 int BPF_KRETPROBE(tcp_v4_connect_ret, int ret)
204 {
205 	return exit_tcp_connect(ctx, ret, 4);
206 }
207 
208 SEC("kprobe/tcp_v6_connect")
BPF_KPROBE(tcp_v6_connect,struct sock * sk)209 int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
210 {
211 	return enter_tcp_connect(ctx, sk);
212 }
213 
214 SEC("kretprobe/tcp_v6_connect")
BPF_KRETPROBE(tcp_v6_connect_ret,int ret)215 int BPF_KRETPROBE(tcp_v6_connect_ret, int ret)
216 {
217 	return exit_tcp_connect(ctx, ret, 6);
218 }
219 
220 char LICENSE[] SEC("license") = "GPL";
221