1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 // The resulting .o needs to load on the Android T Beta 3 bpfloader
18 #define BPFLOADER_MIN_VER BPFLOADER_T_BETA3_VERSION
19
20 #include <bpf_helpers.h>
21 #include <linux/bpf.h>
22 #include <linux/if.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_packet.h>
25 #include <linux/in.h>
26 #include <linux/in6.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <linux/pkt_cls.h>
30 #include <linux/tcp.h>
31 #include <stdbool.h>
32 #include <stdint.h>
33 #include "bpf_net_helpers.h"
34 #include "bpf_shared.h"
35
36 // This is defined for cgroup bpf filter only.
37 #define BPF_DROP_UNLESS_DNS 2
38 #define BPF_PASS 1
39 #define BPF_DROP 0
40
41 // This is used for xt_bpf program only.
42 #define BPF_NOMATCH 0
43 #define BPF_MATCH 1
44
45 #define BPF_EGRESS 0
46 #define BPF_INGRESS 1
47
48 #define IP_PROTO_OFF offsetof(struct iphdr, protocol)
49 #define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
50 #define IPPROTO_IHL_OFF 0
51 #define TCP_FLAG_OFF 13
52 #define RST_OFFSET 2
53
54 // For maps netd does not need to access
55 #define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
56 DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
57 AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", false)
58
59 // For maps netd only needs read only access to
60 #define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
61 DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
62 AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", false)
63
64 // For maps netd needs to be able to read and write
65 #define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
66 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
67 AID_ROOT, AID_NET_BW_ACCT, 0660)
68
69 // Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
70 // see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
71 // Additionally on newer kernels the bpf jit can optimize out the lookups.
72 // only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
DEFINE_BPF_MAP_RO_NETD(configuration_map,ARRAY,uint32_t,uint32_t,CONFIGURATION_MAP_SIZE)73 DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
74
75 DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
76 DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
77 DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
78 DEFINE_BPF_MAP_RW_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
79 DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
80 DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
81 DEFINE_BPF_MAP_NO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
82 DEFINE_BPF_MAP_RW_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
83
84 /* never actually used from ebpf */
85 DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
86
87 // iptables xt_bpf programs need to be usable by both netd and netutils_wrappers
88 #define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
89 DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog)
90
91 // programs that need to be usable by netd, but not by netutils_wrappers
92 #define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
93 DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \
94 KVER_NONE, KVER_INF, false, "fs_bpf_netd_readonly", "")
95
96 // programs that only need to be usable by the system server
97 #define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
98 DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \
99 KVER_NONE, KVER_INF, false, "fs_bpf_net_shared", "")
100
101 static __always_inline int is_system_uid(uint32_t uid) {
102 // MIN_SYSTEM_UID is AID_ROOT == 0, so uint32_t is *always* >= 0
103 // MAX_SYSTEM_UID is AID_NOBODY == 9999, while AID_APP_START == 10000
104 return (uid < AID_APP_START);
105 }
106
107 /*
108 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
109 * and that TCP is using the Linux default settings with TCP timestamp option enabled
110 * which uses 12 TCP option bytes per frame.
111 *
112 * These are not unreasonable assumptions:
113 *
114 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
115 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
116 *
117 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
118 * is bound to be needed.
119 *
120 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
121 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
122 * our extra overhead will be slightly off, but probably still better than assuming none.
123 *
124 * Most servers are also Linux and thus support/default to using TCP timestamp option
125 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
126 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
127 *
128 * All together this should be more correct than if we simply ignored GSO frames
129 * (ie. counted them as single packets with no extra overhead)
130 *
131 * Especially since the number of packets is important for any future clat offload correction.
132 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
133 */
134 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
135 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
136 int direction, TypeOfKey* key) { \
137 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
138 if (!value) { \
139 StatsValue newValue = {}; \
140 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
141 value = bpf_##the_stats_map##_lookup_elem(key); \
142 } \
143 if (value) { \
144 const int mtu = 1500; \
145 uint64_t packets = 1; \
146 uint64_t bytes = skb->len; \
147 if (bytes > mtu) { \
148 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
149 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
150 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
151 int mss = mtu - tcp_overhead; \
152 uint64_t payload = bytes - tcp_overhead; \
153 packets = (payload + mss - 1) / mss; \
154 bytes = tcp_overhead * packets + payload; \
155 } \
156 if (direction == BPF_EGRESS) { \
157 __sync_fetch_and_add(&value->txPackets, packets); \
158 __sync_fetch_and_add(&value->txBytes, bytes); \
159 } else if (direction == BPF_INGRESS) { \
160 __sync_fetch_and_add(&value->rxPackets, packets); \
161 __sync_fetch_and_add(&value->rxBytes, bytes); \
162 } \
163 } \
164 }
165
DEFINE_UPDATE_STATS(app_uid_stats_map,uint32_t)166 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
167 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
168 DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
169 DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
170
171 static inline bool skip_owner_match(struct __sk_buff* skb) {
172 int offset = -1;
173 int ret = 0;
174 if (skb->protocol == htons(ETH_P_IP)) {
175 offset = IP_PROTO_OFF;
176 uint8_t proto, ihl;
177 uint8_t flag;
178 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
179 if (!ret) {
180 if (proto == IPPROTO_ESP) {
181 return true;
182 } else if (proto == IPPROTO_TCP) {
183 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
184 ihl = ihl & 0x0F;
185 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
186 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
187 return true;
188 }
189 }
190 }
191 } else if (skb->protocol == htons(ETH_P_IPV6)) {
192 offset = IPV6_PROTO_OFF;
193 uint8_t proto;
194 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
195 if (!ret) {
196 if (proto == IPPROTO_ESP) {
197 return true;
198 } else if (proto == IPPROTO_TCP) {
199 uint8_t flag;
200 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
201 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
202 return true;
203 }
204 }
205 }
206 }
207 return false;
208 }
209
getConfig(uint32_t configKey)210 static __always_inline BpfConfig getConfig(uint32_t configKey) {
211 uint32_t mapSettingKey = configKey;
212 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
213 if (!config) {
214 // Couldn't read configuration entry. Assume everything is disabled.
215 return DEFAULT_CONFIG;
216 }
217 return *config;
218 }
219
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,int direction)220 static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
221 if (skip_owner_match(skb)) return BPF_PASS;
222
223 if (is_system_uid(uid)) return BPF_PASS;
224
225 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
226
227 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
228 uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
229 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
230
231 if (enabledRules) {
232 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
233 return BPF_DROP;
234 }
235 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
236 return BPF_DROP;
237 }
238 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
239 return BPF_DROP;
240 }
241 if ((enabledRules & RESTRICTED_MATCH) && !(uidRules & RESTRICTED_MATCH)) {
242 return BPF_DROP;
243 }
244 if ((enabledRules & LOW_POWER_STANDBY_MATCH) && !(uidRules & LOW_POWER_STANDBY_MATCH)) {
245 return BPF_DROP;
246 }
247 if ((enabledRules & OEM_DENY_1_MATCH) && (uidRules & OEM_DENY_1_MATCH)) {
248 return BPF_DROP;
249 }
250 if ((enabledRules & OEM_DENY_2_MATCH) && (uidRules & OEM_DENY_2_MATCH)) {
251 return BPF_DROP;
252 }
253 if ((enabledRules & OEM_DENY_3_MATCH) && (uidRules & OEM_DENY_3_MATCH)) {
254 return BPF_DROP;
255 }
256 }
257 if (direction == BPF_INGRESS && skb->ifindex != 1) {
258 if (uidRules & IIF_MATCH) {
259 if (allowed_iif && skb->ifindex != allowed_iif) {
260 // Drops packets not coming from lo nor the allowed interface
261 // allowed interface=0 is a wildcard and does not drop packets
262 return BPF_DROP_UNLESS_DNS;
263 }
264 } else if (uidRules & LOCKDOWN_VPN_MATCH) {
265 // Drops packets not coming from lo and rule does not have IIF_MATCH but has
266 // LOCKDOWN_VPN_MATCH
267 return BPF_DROP_UNLESS_DNS;
268 }
269 }
270 return BPF_PASS;
271 }
272
update_stats_with_config(struct __sk_buff * skb,int direction,StatsKey * key,uint32_t selectedMap)273 static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
274 StatsKey* key, uint32_t selectedMap) {
275 if (selectedMap == SELECT_MAP_A) {
276 update_stats_map_A(skb, direction, key);
277 } else if (selectedMap == SELECT_MAP_B) {
278 update_stats_map_B(skb, direction, key);
279 }
280 }
281
bpf_traffic_account(struct __sk_buff * skb,int direction)282 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
283 uint32_t sock_uid = bpf_get_socket_uid(skb);
284 uint64_t cookie = bpf_get_socket_cookie(skb);
285 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
286 uint32_t uid, tag;
287 if (utag) {
288 uid = utag->uid;
289 tag = utag->tag;
290 } else {
291 uid = sock_uid;
292 tag = 0;
293 }
294
295 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
296 // interface is accounted for and subject to usage restrictions.
297 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
298 if (sock_uid == AID_CLAT || uid == AID_CLAT) {
299 return BPF_PASS;
300 }
301
302 int match = bpf_owner_match(skb, sock_uid, direction);
303 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
304 // If an outbound packet is going to be dropped, we do not count that
305 // traffic.
306 return match;
307 }
308
309 // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
310 // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
311 // and TrafficStatsConstants.java
312 #define TAG_SYSTEM_DNS 0xFFFFFF82
313 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
314 uid = sock_uid;
315 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
316 } else {
317 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
318 }
319
320 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
321
322 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
323 if (counterSet) key.counterSet = (uint32_t)*counterSet;
324
325 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
326 uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
327
328 // Use asm("%0 &= 1" : "+r"(match)) before return match,
329 // to help kernel's bpf verifier, so that it can be 100% certain
330 // that the returned value is always BPF_NOMATCH(0) or BPF_MATCH(1).
331 if (!selectedMap) {
332 asm("%0 &= 1" : "+r"(match));
333 return match;
334 }
335
336 if (key.tag) {
337 update_stats_with_config(skb, direction, &key, *selectedMap);
338 key.tag = 0;
339 }
340
341 update_stats_with_config(skb, direction, &key, *selectedMap);
342 update_app_uid_stats_map(skb, direction, &uid);
343 asm("%0 &= 1" : "+r"(match));
344 return match;
345 }
346
347 DEFINE_NETD_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_ingress)
348 (struct __sk_buff* skb) {
349 return bpf_traffic_account(skb, BPF_INGRESS);
350 }
351
352 DEFINE_NETD_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_egress)
353 (struct __sk_buff* skb) {
354 return bpf_traffic_account(skb, BPF_EGRESS);
355 }
356
357 // WARNING: Android T's non-updatable netd depends on the name of this program.
358 DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
359 (struct __sk_buff* skb) {
360 // Clat daemon does not generate new traffic, all its traffic is accounted for already
361 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
362 // but that can be corrected for later when merging v4-foo stats into interface foo's).
363 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
364 uint32_t sock_uid = bpf_get_socket_uid(skb);
365 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
366 if (sock_uid == AID_SYSTEM) {
367 uint64_t cookie = bpf_get_socket_cookie(skb);
368 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
369 if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
370 }
371
372 uint32_t key = skb->ifindex;
373 update_iface_stats_map(skb, BPF_EGRESS, &key);
374 return BPF_MATCH;
375 }
376
377 // WARNING: Android T's non-updatable netd depends on the name of this program.
378 DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
379 (struct __sk_buff* skb) {
380 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
381 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
382 // It will be accounted for on the v4-* clat interface instead.
383 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
384
385 uint32_t key = skb->ifindex;
386 update_iface_stats_map(skb, BPF_INGRESS, &key);
387 return BPF_MATCH;
388 }
389
390 DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN,
391 tc_bpf_ingress_account_prog)
392 (struct __sk_buff* skb) {
393 if (is_received_skb(skb)) {
394 // Account for ingress traffic before tc drops it.
395 uint32_t key = skb->ifindex;
396 update_iface_stats_map(skb, BPF_INGRESS, &key);
397 }
398 return TC_ACT_UNSPEC;
399 }
400
401 // WARNING: Android T's non-updatable netd depends on the name of this program.
402 DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
403 (struct __sk_buff* skb) {
404 uint32_t sock_uid = bpf_get_socket_uid(skb);
405 if (is_system_uid(sock_uid)) return BPF_MATCH;
406
407 // 65534 is the overflow 'nobody' uid, usually this being returned means
408 // that skb->sk is NULL during RX (early decap socket lookup failure),
409 // which commonly happens for incoming packets to an unconnected udp socket.
410 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
411 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
412 return BPF_MATCH;
413
414 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
415 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
416 return BPF_NOMATCH;
417 }
418
419 // WARNING: Android T's non-updatable netd depends on the name of this program.
420 DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
421 (struct __sk_buff* skb) {
422 uint32_t sock_uid = bpf_get_socket_uid(skb);
423 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
424 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
425 return BPF_NOMATCH;
426 }
427
428 DEFINE_BPF_PROG_EXT("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
429 KVER(4, 14, 0), KVER_INF, false, "fs_bpf_netd_readonly", "")
430 (struct bpf_sock* sk) {
431 uint64_t gid_uid = bpf_get_current_uid_gid();
432 /*
433 * A given app is guaranteed to have the same app ID in all the profiles in
434 * which it is installed, and install permission is granted to app for all
435 * user at install time so we only check the appId part of a request uid at
436 * run time. See UserHandle#isSameApp for detail.
437 */
438 uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000
439 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
440 if (!permissions) {
441 // UID not in map. Default to just INTERNET permission.
442 return 1;
443 }
444
445 // A return value of 1 means allow, everything else means deny.
446 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
447 }
448
449 LICENSE("Apache 2.0");
450 CRITICAL("netd");
451