• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // The resulting .o needs to load on Android T+
18 #define BPFLOADER_MIN_VER BPFLOADER_MAINLINE_T_VERSION
19 
20 #include "bpf_net_helpers.h"
21 #include "netd.h"
22 
23 // This is defined for cgroup bpf filter only.
24 static const int DROP = 0;
25 static const int PASS = 1;
26 static const int DROP_UNLESS_DNS = 2;  // internal to our program
27 
28 // offsetof(struct iphdr, ihl) -- but that's a bitfield
29 #define IPPROTO_IHL_OFF 0
30 
31 // This is offsetof(struct tcphdr, "32 bit tcp flag field")
32 // The tcp flags are after be16 source, dest & be32 seq, ack_seq, hence 12 bytes in.
33 //
34 // Note that TCP_FLAG_{ACK,PSH,RST,SYN,FIN} are htonl(0x00{10,08,04,02,01}0000)
35 // see include/uapi/linux/tcp.h
36 #define TCP_FLAG32_OFF 12
37 
38 #define TCP_FLAG8_OFF (TCP_FLAG32_OFF + 1)
39 
40 // For maps netd does not need to access
41 #define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
42     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,         \
43                        AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "",   \
44                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,              \
45                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
46 
47 // For maps netd only needs read only access to
48 #define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries)  \
49     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,          \
50                        AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", \
51                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,               \
52                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
53 
54 // For maps netd needs to be able to read and write
55 #define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
56     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
57                        AID_ROOT, AID_NET_BW_ACCT, 0660)
58 
59 // Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
60 // see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
61 // Additionally on newer kernels the bpf jit can optimize out the lookups.
62 // only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
63 DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
64 
65 // TODO: consider whether we can merge some of these maps
66 // for example it might be possible to merge 2 or 3 of:
67 //   uid_counterset_map + uid_owner_map + uid_permission_map
68 DEFINE_BPF_MAP_NO_NETD(blocked_ports_map, ARRAY, int, uint64_t,
69                        1024 /* 64K ports -> 1024 u64s */)
70 DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
71 DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
72 DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
73 DEFINE_BPF_MAP_RO_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
74 DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
75 DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
76 DEFINE_BPF_MAP_RO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
77 DEFINE_BPF_MAP_RO_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
78 DEFINE_BPF_MAP_NO_NETD(ingress_discard_map, HASH, IngressDiscardKey, IngressDiscardValue,
79                        INGRESS_DISCARD_MAP_SIZE)
80 
81 DEFINE_BPF_MAP_RW_NETD(lock_array_test_map, ARRAY, uint32_t, bool, 1)
82 DEFINE_BPF_MAP_RW_NETD(lock_hash_test_map, HASH, uint32_t, bool, 1)
83 
84 /* never actually used from ebpf */
85 DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
86 
87 // A single-element configuration array, packet tracing is enabled when 'true'.
88 DEFINE_BPF_MAP_EXT(packet_trace_enabled_map, ARRAY, uint32_t, bool, 1,
89                    AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
90                    BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
91                    LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
92 
93 // A ring buffer on which packet information is pushed.
94 DEFINE_BPF_RINGBUF_EXT(packet_trace_ringbuf, PacketTrace, PACKET_TRACE_BUF_SIZE,
95                        AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
96                        BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
97                        LOAD_ON_USER, LOAD_ON_USERDEBUG);
98 
DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map,ARRAY,uint32_t,bool,DATA_SAVER_ENABLED_MAP_SIZE)99 DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map, ARRAY, uint32_t, bool,
100                        DATA_SAVER_ENABLED_MAP_SIZE)
101 
102 DEFINE_BPF_MAP_EXT(local_net_access_map, LPM_TRIE, LocalNetAccessKey, bool, 1000,
103                    AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", PRIVATE,
104                    BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG, LOAD_ON_USER,
105                    LOAD_ON_USERDEBUG, 0)
106 
107 // not preallocated
108 DEFINE_BPF_MAP_EXT(local_net_blocked_uid_map, HASH, uint32_t, bool, -1000,
109                    AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", PRIVATE,
110                    BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG, LOAD_ON_USER,
111                    LOAD_ON_USERDEBUG, 0)
112 
113 // iptables xt_bpf programs need to be usable by both netd and netutils_wrappers
114 // selinux contexts, because even non-xt_bpf iptables mutations are implemented as
115 // a full table dump, followed by an update in userspace, and then a reload into the kernel,
116 // where any already in-use xt_bpf matchers are serialized as the path to the pinned
117 // program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather
118 // the kernel acting on behalf of it) must be able to retrieve the pinned program
119 // for the reload to succeed
120 #define DEFINE_XTBPF_PROG(SECTION_NAME, the_prog) \
121     DEFINE_BPF_PROG(SECTION_NAME, AID_ROOT, AID_NET_ADMIN, the_prog)
122 
123 // programs that need to be usable by netd, but not by netutils_wrappers
124 // (this is because these are currently attached by the mainline provided libnetd_updatable .so
125 // which is loaded into netd and thus runs as netd uid/gid/selinux context)
126 #define DEFINE_NETD_BPF_PROG_RANGES(SECTION_NAME, the_prog, minKV, maxKV, min_loader, max_loader) \
127     DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_ROOT, the_prog,                               \
128                         minKV, maxKV, min_loader, max_loader, MANDATORY,                          \
129                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
130 
131 #define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, the_prog, minKV, maxKV) \
132     DEFINE_NETD_BPF_PROG_RANGES(SECTION_NAME, the_prog, minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER)
133 
134 #define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, the_prog, min_kv) \
135     DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, the_prog, min_kv, KVER_INF)
136 
137 #define DEFINE_NETD_BPF_PROG(SECTION_NAME, the_prog) \
138     DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, the_prog, KVER_NONE)
139 
140 #define DEFINE_NETD_V_BPF_PROG_KVER(SECTION_NAME, the_prog, minKV)                                \
141     DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_ROOT, the_prog, minKV,                        \
142                         KVER_INF, BPFLOADER_MAINLINE_V_VERSION, BPFLOADER_MAX_VER, MANDATORY,     \
143                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
144 
145 // programs that only need to be usable by the system server
146 #define DEFINE_SYS_BPF_PROG(SECTION_NAME, the_prog) \
147     DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_NET_ADMIN, the_prog, KVER_NONE, KVER_INF,  \
148                         BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
149                         "fs_bpf_net_shared", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
150 
151 /*
152  * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
153  * and that TCP is using the Linux default settings with TCP timestamp option enabled
154  * which uses 12 TCP option bytes per frame.
155  *
156  * These are not unreasonable assumptions:
157  *
158  * The internet does not really support MTUs greater than 1500, so most TCP traffic will
159  * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
160  *
161  * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
162  * is bound to be needed.
163  *
164  * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
165  * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
166  * our extra overhead will be slightly off, but probably still better than assuming none.
167  *
168  * Most servers are also Linux and thus support/default to using TCP timestamp option
169  * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
170  * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
171  *
172  * All together this should be more correct than if we simply ignored GSO frames
173  * (ie. counted them as single packets with no extra overhead)
174  *
175  * Especially since the number of packets is important for any future clat offload correction.
176  * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
177  */
178 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey)                                            \
179     static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
180                                                               const TypeOfKey* const key,        \
181                                                               const struct egress_bool egress,   \
182                                                      __unused const struct kver_uint kver) {     \
183         StatsValue* value = bpf_##the_stats_map##_lookup_elem(key);                              \
184         if (!value) {                                                                            \
185             StatsValue newValue = {};                                                            \
186             bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST);                      \
187             value = bpf_##the_stats_map##_lookup_elem(key);                                      \
188         }                                                                                        \
189         if (value) {                                                                             \
190             const int mtu = 1500;                                                                \
191             uint64_t packets = 1;                                                                \
192             uint64_t bytes = skb->len;                                                           \
193             if (bytes > mtu) {                                                                   \
194                 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6));                             \
195                 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr));     \
196                 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12;                     \
197                 int mss = mtu - tcp_overhead;                                                    \
198                 uint64_t payload = bytes - tcp_overhead;                                         \
199                 packets = (payload + mss - 1) / mss;                                             \
200                 bytes = tcp_overhead * packets + payload;                                        \
201             }                                                                                    \
202             if (egress.egress) {                                                                 \
203                 __sync_fetch_and_add(&value->txPackets, packets);                                \
204                 __sync_fetch_and_add(&value->txBytes, bytes);                                    \
205             } else {                                                                             \
206                 __sync_fetch_and_add(&value->rxPackets, packets);                                \
207                 __sync_fetch_and_add(&value->rxBytes, bytes);                                    \
208             }                                                                                    \
209         }                                                                                        \
210     }
211 
212 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
213 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
214 DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
215 DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
216 
217 // both of these return 0 on success or -EFAULT on failure (and zero out the buffer)
218 static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb,
219                                                          const int L3_off,
220                                                          void* const to,
221                                                          const int len,
222                                                          const struct kver_uint kver) {
223     // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
224     // ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
225     //
226     // 4.19+ kernels support the 'bpf_skb_load_bytes_relative()' bpf helper function,
227     // so we can use it.  On pre-4.19 kernels we cannot use the relative load helper,
228     // and thus will simply get things wrong if there's any L2 (ethernet) header in the skb.
229     //
230     // Luckily, for cellular traffic, there likely isn't any, as cell is usually 'rawip'.
231     //
232     // However, this does mean that wifi (and ethernet) on 4.14 is basically a lost cause:
233     // we'll be making decisions based on the *wrong* bytes (fetched from the wrong offset),
234     // because the 'L3_off' passed to bpf_skb_load_bytes() should be increased by l2_header_size,
235     // which for ethernet is 14 and not 0 like it is for rawip.
236     //
237     // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
238     // since those extend the ethernet header from 14 to 18 bytes.
239     return KVER_IS_AT_LEAST(kver, 4, 19, 0)
240         ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
241         : bpf_skb_load_bytes(skb, L3_off, to, len);
242 }
243 
244 // False iff arguments are found with longest prefix match lookup and disallowed.
is_local_net_access_allowed(const uint32_t if_index,const struct in6_addr * remote_ip6,const uint16_t protocol,const __be16 remote_port)245 static inline __always_inline bool is_local_net_access_allowed(const uint32_t if_index,
246         const struct in6_addr* remote_ip6, const uint16_t protocol, const __be16 remote_port) {
247     LocalNetAccessKey query_key = {
248         .lpm_bitlen = 8 * (sizeof(if_index) + sizeof(*remote_ip6) + sizeof(protocol)
249             + sizeof(remote_port)),
250         .if_index = if_index,
251         .remote_ip6 = *remote_ip6,
252         .protocol = protocol,
253         .remote_port = remote_port
254     };
255     bool* v = bpf_local_net_access_map_lookup_elem(&query_key);
256     return v ? *v : true;
257 }
258 
should_block_local_network_packets(struct __sk_buff * skb,const uint32_t uid,const struct egress_bool egress,const struct kver_uint kver)259 static __always_inline inline bool should_block_local_network_packets(struct __sk_buff *skb,
260                                    const uint32_t uid, const struct egress_bool egress,
261                                    const struct kver_uint kver) {
262     if (is_system_uid(uid)) return false;
263 
264     bool* block_local_net = bpf_local_net_blocked_uid_map_lookup_elem(&uid);
265     if (!block_local_net) return false; // uid not found in map
266     if (!*block_local_net) return false; // lookup returned 'bool false'
267 
268     struct in6_addr remote_ip6;
269     uint8_t ip_proto;
270     uint8_t L4_off;
271     if (skb->protocol == htons(ETH_P_IP)) {
272         int remote_ip_ofs = egress.egress ? IP4_OFFSET(daddr) : IP4_OFFSET(saddr);
273         remote_ip6.s6_addr32[0] = 0;
274         remote_ip6.s6_addr32[1] = 0;
275         remote_ip6.s6_addr32[2] = htonl(0xFFFF);
276         (void)bpf_skb_load_bytes_net(skb, remote_ip_ofs, &remote_ip6.s6_addr32[3], 4, kver);
277         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &ip_proto, sizeof(ip_proto), kver);
278         uint8_t ihl;
279         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver);
280         L4_off = (ihl & 0x0F) * 4;  // IHL calculation.
281     } else if (skb->protocol == htons(ETH_P_IPV6)) {
282         int remote_ip_ofs = egress.egress ? IP6_OFFSET(daddr) : IP6_OFFSET(saddr);
283         (void)bpf_skb_load_bytes_net(skb, remote_ip_ofs, &remote_ip6, sizeof(remote_ip6), kver);
284         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &ip_proto, sizeof(ip_proto), kver);
285         L4_off = sizeof(struct ipv6hdr);
286     } else {
287         return false;
288     }
289 
290     __be16 remote_port = 0;
291     switch (ip_proto) {
292       case IPPROTO_TCP:
293       case IPPROTO_DCCP:
294       case IPPROTO_UDP:
295       case IPPROTO_UDPLITE:
296       case IPPROTO_SCTP:
297         (void)bpf_skb_load_bytes_net(skb, L4_off + (egress.egress ? 2 : 0), &remote_port, sizeof(remote_port), kver);
298         break;
299     }
300 
301     return !is_local_net_access_allowed(skb->ifindex, &remote_ip6, ip_proto, remote_port);
302 }
303 
do_packet_tracing(const struct __sk_buff * const skb,const struct egress_bool egress,const uint32_t uid,const uint32_t tag,const struct kver_uint kver)304 static __always_inline inline void do_packet_tracing(
305         const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid,
306         const uint32_t tag, const struct kver_uint kver) {
307     if (!KVER_IS_AT_LEAST(kver, 5, 10, 0)) return;
308 
309     uint32_t mapKey = 0;
310     bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
311     if (traceConfig == NULL) return;
312     if (*traceConfig == false) return;
313 
314     PacketTrace* pkt = bpf_packet_trace_ringbuf_reserve();
315     if (pkt == NULL) return;
316 
317     // Errors from bpf_skb_load_bytes_net are ignored to favor returning something
318     // over returning nothing. In the event of an error, the kernel will fill in
319     // zero for the destination memory. Do not change the default '= 0' below.
320 
321     uint8_t proto = 0;
322     uint8_t L4_off = 0;
323     uint8_t ipVersion = 0;
324     if (skb->protocol == htons(ETH_P_IP)) {
325         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
326         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver);
327         L4_off = (L4_off & 0x0F) * 4;  // IHL calculation.
328         ipVersion = 4;
329     } else if (skb->protocol == htons(ETH_P_IPV6)) {
330         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
331         L4_off = sizeof(struct ipv6hdr);
332         ipVersion = 6;
333         // skip over a *single* HOPOPTS or DSTOPTS extension header (if present)
334         if (proto == IPPROTO_HOPOPTS || proto == IPPROTO_DSTOPTS) {
335             struct {
336                 uint8_t proto, len;
337             } ext_hdr;
338             if (!bpf_skb_load_bytes_net(skb, L4_off, &ext_hdr, sizeof(ext_hdr), kver)) {
339                 proto = ext_hdr.proto;
340                 L4_off += (ext_hdr.len + 1) * 8;
341             }
342         }
343     }
344 
345     uint8_t flags = 0;
346     __be16 sport = 0, dport = 0;
347     if (L4_off >= 20) {
348       switch (proto) {
349         case IPPROTO_TCP:
350           (void)bpf_skb_load_bytes_net(skb, L4_off + TCP_FLAG8_OFF, &flags, sizeof(flags), kver);
351           // fallthrough
352         case IPPROTO_DCCP:
353         case IPPROTO_UDP:
354         case IPPROTO_UDPLITE:
355         case IPPROTO_SCTP:
356           // all of these L4 protocols start with be16 src & dst port
357           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, &sport, sizeof(sport), kver);
358           (void)bpf_skb_load_bytes_net(skb, L4_off + 2, &dport, sizeof(dport), kver);
359           break;
360         case IPPROTO_ICMP:
361         case IPPROTO_ICMPV6:
362           // Both IPv4 and IPv6 icmp start with u8 type & code, which we store in the bottom
363           // (ie. second) byte of sport/dport (which are be16s), the top byte is already zero.
364           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, (char *)&sport + 1, 1, kver); //type
365           (void)bpf_skb_load_bytes_net(skb, L4_off + 1, (char *)&dport + 1, 1, kver); //code
366           break;
367       }
368     }
369 
370     pkt->timestampNs = bpf_ktime_get_boot_ns();
371     pkt->ifindex = skb->ifindex;
372     pkt->length = skb->len;
373 
374     pkt->uid = uid;
375     pkt->tag = tag;
376     pkt->sport = sport;
377     pkt->dport = dport;
378 
379     pkt->egress = egress.egress;
380     pkt->wakeup = !egress.egress && (skb->mark & 0x80000000);  // Fwmark.ingress_cpu_wakeup
381     pkt->ipProto = proto;
382     pkt->tcpFlags = flags;
383     pkt->ipVersion = ipVersion;
384 
385     bpf_packet_trace_ringbuf_submit(pkt);
386 }
387 
skip_owner_match(struct __sk_buff * skb,const struct egress_bool egress,const struct kver_uint kver)388 static __always_inline inline bool skip_owner_match(struct __sk_buff* skb,
389                                                     const struct egress_bool egress,
390                                                     const struct kver_uint kver) {
391     uint32_t flag = 0;
392     if (skb->protocol == htons(ETH_P_IP)) {
393         uint8_t proto;
394         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
395         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
396         if (proto == IPPROTO_ESP) return true;
397         if (proto != IPPROTO_TCP) return false;  // handles read failure above
398         uint8_t ihl;
399         // we don't check for success, as this cannot fail, as it is earlier in the packet than
400         // proto, the reading of which must have succeeded, additionally the next read
401         // (a little bit deeper in the packet in spite of ihl being zeroed) of the tcp flags
402         // field will also fail, and that failure we already handle correctly
403         // (we also don't check that ihl in [0x45,0x4F] nor that ipv4 header checksum is correct)
404         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver);
405         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
406         (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF,
407                                      &flag, sizeof(flag), kver);
408     } else if (skb->protocol == htons(ETH_P_IPV6)) {
409         uint8_t proto;
410         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
411         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
412         if (proto == IPPROTO_ESP) return true;
413         if (proto != IPPROTO_TCP) return false;  // handles read failure above
414         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
415         (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF,
416                                      &flag, sizeof(flag), kver);
417     } else {
418         return false;
419     }
420     // Always allow RST's, and additionally allow ingress FINs
421     return flag & (TCP_FLAG_RST | (egress.egress ? 0 : TCP_FLAG_FIN));  // false on read failure
422 }
423 
getConfig(uint32_t configKey)424 static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
425     uint32_t mapSettingKey = configKey;
426     BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
427     if (!config) {
428         // Couldn't read configuration entry. Assume everything is disabled.
429         return DEFAULT_CONFIG;
430     }
431     return *config;
432 }
433 
ingress_should_discard(struct __sk_buff * skb,const struct kver_uint kver)434 static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
435                                                           const struct kver_uint kver) {
436     // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
437     // provides relative to L3 header reads.  Without that we could fetch the wrong bytes.
438     // Additionally earlier bpf verifiers are much harder to please.
439     if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
440 
441     IngressDiscardKey k = {};
442     if (skb->protocol == htons(ETH_P_IP)) {
443         k.daddr.s6_addr32[2] = htonl(0xFFFF);
444         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(daddr), &k.daddr.s6_addr32[3], 4, kver);
445     } else if (skb->protocol == htons(ETH_P_IPV6)) {
446         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(daddr), &k.daddr, sizeof(k.daddr), kver);
447     } else {
448         return false; // non IPv4/IPv6, so no IP to match on
449     }
450 
451     // we didn't check for load success, because destination bytes will be zeroed if
452     // bpf_skb_load_bytes_net() fails, instead we rely on daddr of '::' and '::ffff:0.0.0.0'
453     // never being present in the map itself
454 
455     IngressDiscardValue* v = bpf_ingress_discard_map_lookup_elem(&k);
456     if (!v) return false;  // lookup failure -> no protection in place -> allow
457     // if (skb->ifindex == 1) return false;  // allow 'lo', but can't happen - see callsite
458     if (skb->ifindex == v->iif[0]) return false;  // allowed interface
459     if (skb->ifindex == v->iif[1]) return false;  // allowed interface
460     return true;  // disallowed interface
461 }
462 
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,const struct egress_bool egress,const struct kver_uint kver,const struct sdk_level_uint lvl)463 static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
464                                                   const struct egress_bool egress,
465                                                   const struct kver_uint kver,
466                                                   const struct sdk_level_uint lvl) {
467     if (is_system_uid(uid)) return PASS;
468 
469     if (skip_owner_match(skb, egress, kver)) return PASS;
470 
471     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
472 
473     // BACKGROUND match does not apply to loopback traffic
474     if (skb->ifindex == 1) enabledRules &= ~BACKGROUND_MATCH;
475 
476     UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
477     uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
478     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
479 
480     if (isBlockedByUidRules(enabledRules, uidRules)) return DROP;
481 
482     if (!egress.egress && skb->ifindex != 1) {
483         if (ingress_should_discard(skb, kver)) return DROP;
484         if (uidRules & IIF_MATCH) {
485             if (allowed_iif && skb->ifindex != allowed_iif) {
486                 // Drops packets not coming from lo nor the allowed interface
487                 // allowed interface=0 is a wildcard and does not drop packets
488                 return DROP_UNLESS_DNS;
489             }
490         } else if (uidRules & LOCKDOWN_VPN_MATCH) {
491             // Drops packets not coming from lo and rule does not have IIF_MATCH but has
492             // LOCKDOWN_VPN_MATCH
493             return DROP_UNLESS_DNS;
494         }
495     }
496 
497     if (SDK_LEVEL_IS_AT_LEAST(lvl, 25Q2) && skb->ifindex == 1) {
498         // TODO: sdksandbox localhost restrictions
499     }
500 
501     return PASS;
502 }
503 
update_stats_with_config(const uint32_t selectedMap,const struct __sk_buff * const skb,const StatsKey * const key,const struct egress_bool egress,const struct kver_uint kver)504 static __always_inline inline void update_stats_with_config(const uint32_t selectedMap,
505                                                             const struct __sk_buff* const skb,
506                                                             const StatsKey* const key,
507                                                             const struct egress_bool egress,
508                                                             const struct kver_uint kver) {
509     if (selectedMap == SELECT_MAP_A) {
510         update_stats_map_A(skb, key, egress, kver);
511     } else {
512         update_stats_map_B(skb, key, egress, kver);
513     }
514 }
515 
bpf_traffic_account(struct __sk_buff * skb,const struct egress_bool egress,const struct kver_uint kver,const struct sdk_level_uint lvl)516 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb,
517                                                       const struct egress_bool egress,
518                                                       const struct kver_uint kver,
519                                                       const struct sdk_level_uint lvl) {
520     // sock_uid will be 'overflowuid' if !sk_fullsock(sk_to_full_sk(skb->sk))
521     uint32_t sock_uid = bpf_get_socket_uid(skb);
522 
523     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
524     // usually this being returned means that skb->sk is NULL during RX
525     // (early decap socket lookup failure), which commonly happens for incoming
526     // packets to an unconnected udp socket.
527     // But it can also happen for egress from a timewait socket.
528     // Let's treat such cases as 'root' which is_system_uid()
529     if (sock_uid == 65534) sock_uid = 0;
530 
531     uint64_t cookie = bpf_get_socket_cookie(skb);  // 0 iff !skb->sk
532     UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
533     uint32_t uid, tag;
534     if (utag) {
535         uid = utag->uid;
536         tag = utag->tag;
537     } else {
538         uid = sock_uid;
539         tag = 0;
540     }
541 
542     // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
543     // interface is accounted for and subject to usage restrictions.
544     // CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat()
545     // CLAT daemon receives via an untagged AF_PACKET socket.
546     if (egress.egress && uid == AID_CLAT) return PASS;
547 
548     int match = bpf_owner_match(skb, sock_uid, egress, kver, lvl);
549 
550 // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
551 // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
552 // and TrafficStatsConstants.java
553 #define TAG_SYSTEM_DNS 0xFFFFFF82
554     if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
555         uid = sock_uid;
556         if (match == DROP_UNLESS_DNS) match = PASS;
557     } else {
558         if (match == DROP_UNLESS_DNS) match = DROP;
559     }
560 
561     if (SDK_LEVEL_IS_AT_LEAST(lvl, 25Q2) && (match != DROP)) {
562         if (should_block_local_network_packets(skb, uid, egress, kver)) match = DROP;
563     }
564 
565     // If an outbound packet is going to be dropped, we do not count that traffic.
566     if (egress.egress && (match == DROP)) return DROP;
567 
568     StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
569 
570     uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
571     if (counterSet) key.counterSet = (uint32_t)*counterSet;
572 
573     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
574     uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
575 
576     if (!selectedMap) return PASS;  // cannot happen, needed to keep bpf verifier happy
577 
578     do_packet_tracing(skb, egress, uid, tag, kver);
579     update_stats_with_config(*selectedMap, skb, &key, egress, kver);
580     update_app_uid_stats_map(skb, &uid, egress, kver);
581 
582     // We've already handled DROP_UNLESS_DNS up above, thus when we reach here the only
583     // possible values of match are DROP(0) or PASS(1), however we need to use
584     // "match &= 1" before 'return match' to help the kernel's bpf verifier,
585     // so that it can be 100% certain that the returned value is always 0 or 1.
586     // We use assembly so that it cannot be optimized out by a too smart compiler.
587     asm("%0 &= 1" : "+r"(match));
588     return match;
589 }
590 
591 // -----
592 
593 // Supported kernel + platform/os version combinations:
594 //
595 //      | 4.9 | 4.14 | 4.19 | 5.4 | 5.10 | 5.15 | 6.1 | 6.6 | 6.12 |
596 // 25Q2 |     |      |      |  x  |  x   |  x   |  x  |  x  |  x   |
597 //    V |     |      |  x   |  x  |  x   |  x   |  x  |  x  |      | (netbpfload)
598 //    U |     |  x   |  x   |  x  |  x   |  x   |  x  |     |      |
599 //    T |  x  |  x   |  x   |  x  |  x   |  x   |     |     |      | (magic netbpfload)
600 //    S |  x  |  x   |  x   |  x  |  x   |      |     |     |      | (dns netbpfload for offload)
601 //    R |  x  |  x   |  x   |  x  |      |      |     |     |      | (no mainline ebpf)
602 //
603 // Not relevant for eBPF, but R can also run on 4.4
604 
605 // ----- cgroupskb/ingress/stats -----
606 
607 // Android 25Q2+ 5.10+ (localnet protection + tracing)
608 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/ingress/stats$5_10_25q2",
609                             bpf_cgroup_ingress_5_10_25q2, KVER_5_10, KVER_INF,
610                             BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER)
611 (struct __sk_buff* skb) {
612     return bpf_traffic_account(skb, INGRESS, KVER_5_10, SDK_LEVEL_25Q2);
613 }
614 
615 // Android 25Q2+ 5.4 (localnet protection)
616 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/ingress/stats$5_4_25q2",
617                             bpf_cgroup_ingress_5_4_25q2, KVER_5_4, KVER_5_10,
618                             BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER)
619 (struct __sk_buff* skb) {
620     return bpf_traffic_account(skb, INGRESS, KVER_5_4, SDK_LEVEL_25Q2);
621 }
622 
623 // Android U/V 5.10+ (tracing)
624 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/ingress/stats$5_10_u",
625                             bpf_cgroup_ingress_5_10_u, KVER_5_10, KVER_INF,
626                             BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAINLINE_25Q2_VERSION)
627 (struct __sk_buff* skb) {
628     return bpf_traffic_account(skb, INGRESS, KVER_5_10, SDK_LEVEL_U);
629 }
630 
631 // Android T/U/V 4.19 & T/U/V/25Q2 5.4 & T 5.10/5.15
632 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19",
633                                 bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
634 (struct __sk_buff* skb) {
635     return bpf_traffic_account(skb, INGRESS, KVER_4_19, SDK_LEVEL_T);
636 }
637 
638 // Android T 4.9 & T/U 4.14
639 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_9",
640                                 bpf_cgroup_ingress_4_9, KVER_NONE, KVER_4_19)
641 (struct __sk_buff* skb) {
642     return bpf_traffic_account(skb, INGRESS, KVER_NONE, SDK_LEVEL_T);
643 }
644 
645 // ----- cgroupskb/egress/stats -----
646 
647 // Android 25Q2+ 5.10+ (localnet protection + tracing)
648 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/egress/stats$5_10_25q2",
649                             bpf_cgroup_egress_5_10_25q2, KVER_5_10, KVER_INF,
650                             BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER)
651 (struct __sk_buff* skb) {
652     return bpf_traffic_account(skb, EGRESS, KVER_5_10, SDK_LEVEL_25Q2);
653 }
654 
655 // Android 25Q2+ 5.4 (localnet protection)
656 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/egress/stats$5_4_25q2",
657                             bpf_cgroup_egress_5_4_25q2, KVER_5_4, KVER_5_10,
658                             BPFLOADER_MAINLINE_25Q2_VERSION, BPFLOADER_MAX_VER)
659 (struct __sk_buff* skb) {
660     return bpf_traffic_account(skb, EGRESS, KVER_5_4, SDK_LEVEL_25Q2);
661 }
662 
663 // Android U/V 5.10+ (tracing)
664 DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/egress/stats$5_10_u",
665                             bpf_cgroup_egress_5_10_u, KVER_5_10, KVER_INF,
666                             BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAINLINE_25Q2_VERSION)
667 (struct __sk_buff* skb) {
668     return bpf_traffic_account(skb, EGRESS, KVER_5_10, SDK_LEVEL_U);
669 }
670 
671 // Android T/U/V 4.19 & T/U/V/25Q2 5.4 & T 5.10/5.15
672 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19",
673                                 bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
674 (struct __sk_buff* skb) {
675     return bpf_traffic_account(skb, EGRESS, KVER_4_19, SDK_LEVEL_T);
676 }
677 
678 // Android T 4.9 & T/U 4.14
679 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_9",
680                                 bpf_cgroup_egress_4_9, KVER_NONE, KVER_4_19)
681 (struct __sk_buff* skb) {
682     return bpf_traffic_account(skb, EGRESS, KVER_NONE, SDK_LEVEL_T);
683 }
684 
685 // -----
686 
687 // WARNING: Android T's non-updatable netd depends on the name of this program.
688 DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", xt_bpf_egress_prog)
689 (struct __sk_buff* skb) {
690     // Clat daemon does not generate new traffic, all its traffic is accounted for already
691     // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
692     // but that can be corrected for later when merging v4-foo stats into interface foo's).
693     // CLAT sockets are created by system server and tagged as uid CLAT, see tagSocketAsClat()
694     uint32_t sock_uid = bpf_get_socket_uid(skb);
695     if (sock_uid == AID_SYSTEM) {
696         uint64_t cookie = bpf_get_socket_cookie(skb);
697         UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
698         if (utag && utag->uid == AID_CLAT) return XTBPF_NOMATCH;
699     }
700 
701     uint32_t key = skb->ifindex;
702     update_iface_stats_map(skb, &key, EGRESS, KVER_NONE);
703     return XTBPF_MATCH;
704 }
705 
706 // WARNING: Android T's non-updatable netd depends on the name of this program.
707 DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", xt_bpf_ingress_prog)
708 (struct __sk_buff* skb) {
709     // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
710     // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
711     // It will be accounted for on the v4-* clat interface instead.
712     // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
713 
714     uint32_t key = skb->ifindex;
715     update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
716     return XTBPF_MATCH;
717 }
718 
719 DEFINE_SYS_BPF_PROG("schedact/ingress/account",
720                     tc_bpf_ingress_account_prog)
721 (struct __sk_buff* skb) {
722     if (is_received_skb(skb)) {
723         // Account for ingress traffic before tc drops it.
724         uint32_t key = skb->ifindex;
725         update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
726     }
727     return TC_ACT_UNSPEC;
728 }
729 
730 // WARNING: Android T's non-updatable netd depends on the name of this program.
731 DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", xt_bpf_allowlist_prog)
732 (struct __sk_buff* skb) {
733     uint32_t sock_uid = bpf_get_socket_uid(skb);
734     if (is_system_uid(sock_uid)) return XTBPF_MATCH;
735 
736     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
737     // usually this being returned means that skb->sk is NULL during RX
738     // (early decap socket lookup failure), which commonly happens for incoming
739     // packets to an unconnected udp socket.
740     // But it can also happen for egress from a timewait socket.
741     // Let's treat such cases as 'root' which is_system_uid()
742     if (sock_uid == 65534) return XTBPF_MATCH;
743 
744     UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
745     if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? XTBPF_MATCH : XTBPF_NOMATCH;
746     return XTBPF_NOMATCH;
747 }
748 
749 // WARNING: Android T's non-updatable netd depends on the name of this program.
750 DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", xt_bpf_denylist_prog)
751 (struct __sk_buff* skb) {
752     uint32_t sock_uid = bpf_get_socket_uid(skb);
753     UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
754     uint32_t penalty_box = PENALTY_BOX_USER_MATCH | PENALTY_BOX_ADMIN_MATCH;
755     if (denylistMatch) return denylistMatch->rule & penalty_box ? XTBPF_MATCH : XTBPF_NOMATCH;
756     return XTBPF_NOMATCH;
757 }
758 
get_app_permissions()759 static __always_inline inline uint8_t get_app_permissions() {
760     uint64_t gid_uid = bpf_get_current_uid_gid();
761     /*
762      * A given app is guaranteed to have the same app ID in all the profiles in
763      * which it is installed, and install permission is granted to app for all
764      * user at install time so we only check the appId part of a request uid at
765      * run time. See UserHandle#isSameApp for detail.
766      */
767     uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET;  // == PER_USER_RANGE == 100000
768     uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
769     // if UID not in map, then default to just INTERNET permission.
770     return permissions ? *permissions : BPF_PERMISSION_INTERNET;
771 }
772 
773 DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", inet_socket_create, KVER_4_14)
774 (__unused struct bpf_sock* sk) {
775     return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? BPF_ALLOW : BPF_DISALLOW;
776 }
777 
778 DEFINE_NETD_BPF_PROG_KVER("cgroupsockrelease/inet_release", inet_socket_release, KVER_5_10)
779 (struct bpf_sock* sk) {
780     uint64_t cookie = bpf_get_sk_cookie(sk);
781     if (cookie) bpf_cookie_tag_map_delete_elem(&cookie);
782 
783     return 1;
784 }
785 
check_localhost(__unused struct bpf_sock_addr * ctx)786 static __always_inline inline int check_localhost(__unused struct bpf_sock_addr *ctx) {
787     // See include/uapi/linux/bpf.h:
788     //
789     // struct bpf_sock_addr {
790     //   __u32 user_family;	//     R: 4 byte
791     //   __u32 user_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
792     //   __u32 user_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
793     //   __u32 user_port;	// BE, R: 1,2,4-byte,   W: 4-byte
794     //   __u32 family;		//     R: 4 byte
795     //   __u32 type;		//     R: 4 byte
796     //   __u32 protocol;	//     R: 4 byte
797     //   __u32 msg_src_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
798     //   __u32 msg_src_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
799     //   __bpf_md_ptr(struct bpf_sock *, sk);
800     // };
801     return BPF_ALLOW;
802 }
803 
block_port(struct bpf_sock_addr * ctx)804 static inline __always_inline int block_port(struct bpf_sock_addr *ctx) {
805     if (!ctx->user_port) return BPF_ALLOW;
806 
807     switch (ctx->protocol) {
808         case IPPROTO_TCP:
809         case IPPROTO_MPTCP:
810         case IPPROTO_UDP:
811         case IPPROTO_UDPLITE:
812         case IPPROTO_DCCP:
813         case IPPROTO_SCTP:
814             break;
815         default:
816             return BPF_ALLOW; // unknown protocols are allowed
817     }
818 
819     int key = ctx->user_port >> 6;
820     int shift = ctx->user_port & 63;
821 
822     uint64_t *val = bpf_blocked_ports_map_lookup_elem(&key);
823     // Lookup should never fail in reality, but if it does return here to keep the
824     // BPF verifier happy.
825     if (!val) return BPF_ALLOW;
826 
827     if ((*val >> shift) & 1) return BPF_DISALLOW;
828     return BPF_ALLOW;
829 }
830 
831 DEFINE_NETD_BPF_PROG_KVER("bind4/inet4_bind", inet4_bind, KVER_4_19)
832 (struct bpf_sock_addr *ctx) {
833     return block_port(ctx);
834 }
835 
836 DEFINE_NETD_BPF_PROG_KVER("bind6/inet6_bind", inet6_bind, KVER_4_19)
837 (struct bpf_sock_addr *ctx) {
838     return block_port(ctx);
839 }
840 
841 DEFINE_NETD_V_BPF_PROG_KVER("connect4/inet4_connect", inet4_connect, KVER_4_19)
842 (struct bpf_sock_addr *ctx) {
843     return check_localhost(ctx);
844 }
845 
846 DEFINE_NETD_V_BPF_PROG_KVER("connect6/inet6_connect", inet6_connect, KVER_4_19)
847 (struct bpf_sock_addr *ctx) {
848     return check_localhost(ctx);
849 }
850 
851 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg4/udp4_recvmsg", udp4_recvmsg, KVER_4_19)
852 (struct bpf_sock_addr *ctx) {
853     return check_localhost(ctx);
854 }
855 
856 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg6/udp6_recvmsg", udp6_recvmsg, KVER_4_19)
857 (struct bpf_sock_addr *ctx) {
858     return check_localhost(ctx);
859 }
860 
861 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg4/udp4_sendmsg", udp4_sendmsg, KVER_4_19)
862 (struct bpf_sock_addr *ctx) {
863     return check_localhost(ctx);
864 }
865 
866 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg6/udp6_sendmsg", udp6_sendmsg, KVER_4_19)
867 (struct bpf_sock_addr *ctx) {
868     return check_localhost(ctx);
869 }
870 
871 DEFINE_NETD_V_BPF_PROG_KVER("getsockopt/prog", getsockopt_prog, KVER_5_4)
872 (struct bpf_sockopt *ctx) {
873     // Tell kernel to return 'original' kernel reply (instead of the bpf modified buffer)
874     // This is important if the answer is larger than PAGE_SIZE (max size this bpf hook can provide)
875     ctx->optlen = 0;
876     return BPF_ALLOW;
877 }
878 
879 DEFINE_NETD_V_BPF_PROG_KVER("setsockopt/prog", setsockopt_prog, KVER_5_4)
880 (struct bpf_sockopt *ctx) {
881     // Tell kernel to use/process original buffer provided by userspace.
882     // This is important if it is larger than PAGE_SIZE (max size this bpf hook can handle).
883     ctx->optlen = 0;
884     return BPF_ALLOW;
885 }
886 
887 LICENSE("Apache 2.0");
888 CRITICAL("Connectivity and netd");
889