1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * This h file together with bpf_kern.c is used for compiling the eBPF kernel
19 * program. To generate the bpf_kern.o file manually, use the clang prebuilt in
20 * this android tree to compile the files with --target=bpf options. For
21 * example, in system/netd/ directory, execute the following command:
22 * $: ANDROID_BASE_DIRECTORY/prebuilts/clang/host/linux-x86/clang-4691093/bin/clang \
23 * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/uapi/ \
24 * -I ANDROID_BASE_DIRECTORY/system/netd/bpfloader/ \
25 * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/android/uapi/ \
26 * -I ANDROID_BASE_DIRECTORY/bionic/libc/include \
27 * -I ANDROID_BASE_DIRECTORY/system/netd/libbpf/include \
28 * --target=bpf -O2 -c bpfloader/bpf_kern.c -o bpfloader/bpf_kern.o
29 */
30
31 #include <linux/bpf.h>
32 #include <linux/if_ether.h>
33 #include <linux/in.h>
34 #include <linux/in6.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <stdint.h>
38 #include "bpf/bpf_shared.h"
39
40 #define ELF_SEC(NAME) __attribute__((section(NAME), used))
41
42 struct uid_tag {
43 uint32_t uid;
44 uint32_t tag;
45 };
46
47 struct stats_key {
48 uint32_t uid;
49 uint32_t tag;
50 uint32_t counterSet;
51 uint32_t ifaceIndex;
52 };
53
54 struct stats_value {
55 uint64_t rxPackets;
56 uint64_t rxBytes;
57 uint64_t txPackets;
58 uint64_t txBytes;
59 };
60
61 /* helper functions called from eBPF programs written in C */
62 static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
63 static int (*write_to_map_entry)(uint64_t map, void* key, void* value,
64 uint64_t flags) = (void*)BPF_FUNC_map_update_elem;
65 static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem;
66 static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
67 static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
68 static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
69 int len) = (void*)BPF_FUNC_skb_load_bytes;
70 #define BPF_PASS 1
71 #define BPF_DROP 0
72 #define BPF_EGRESS 0
73 #define BPF_INGRESS 1
74
75 #define IP_PROTO_OFF offsetof(struct iphdr, protocol)
76 #define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
77 #define IPPROTO_IHL_OFF 0
78 #define TCP_FLAG_OFF 13
79 #define RST_OFFSET 2
80
bpf_update_stats(struct __sk_buff * skb,uint64_t map,int direction,void * key)81 static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
82 int direction, void *key) {
83 struct stats_value* value;
84 value = find_map_entry(map, key);
85 if (!value) {
86 struct stats_value newValue = {};
87 write_to_map_entry(map, key, &newValue, BPF_NOEXIST);
88 value = find_map_entry(map, key);
89 }
90 if (value) {
91 if (direction == BPF_EGRESS) {
92 __sync_fetch_and_add(&value->txPackets, 1);
93 __sync_fetch_and_add(&value->txBytes, skb->len);
94 } else if (direction == BPF_INGRESS) {
95 __sync_fetch_and_add(&value->rxPackets, 1);
96 __sync_fetch_and_add(&value->rxBytes, skb->len);
97 }
98 }
99 }
100
bpf_owner_match(struct __sk_buff * skb,uint32_t uid)101 static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
102 int offset = -1;
103 int ret = 0;
104 if (skb->protocol == ETH_P_IP) {
105 offset = IP_PROTO_OFF;
106 uint8_t proto, ihl;
107 uint16_t flag;
108 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
109 if (!ret) {
110 if (proto == IPPROTO_ESP) {
111 return 1;
112 } else if (proto == IPPROTO_TCP) {
113 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
114 ihl = ihl & 0x0F;
115 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
116 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
117 return BPF_PASS;
118 }
119 }
120 }
121 } else if (skb->protocol == ETH_P_IPV6) {
122 offset = IPV6_PROTO_OFF;
123 uint8_t proto;
124 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
125 if (!ret) {
126 if (proto == IPPROTO_ESP) {
127 return BPF_PASS;
128 } else if (proto == IPPROTO_TCP) {
129 uint16_t flag;
130 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
131 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
132 return BPF_PASS;
133 }
134 }
135 }
136 }
137
138 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
139
140 // In each of these maps, the entry with key UID_MAP_ENABLED tells us whether that
141 // map is enabled or not.
142 // TODO: replace this with a map of size one that contains a config structure defined in
143 // bpf_shared.h that can be written by userspace and read here.
144 uint32_t mapSettingKey = UID_MAP_ENABLED;
145 uint8_t* ownerMatch;
146 uint8_t* mapEnabled = find_map_entry(DOZABLE_UID_MAP, &mapSettingKey);
147 if (mapEnabled && *mapEnabled) {
148 ownerMatch = find_map_entry(DOZABLE_UID_MAP, &uid);
149 if (ownerMatch) return *ownerMatch;
150 return BPF_DROP;
151 }
152 mapEnabled = find_map_entry(STANDBY_UID_MAP, &mapSettingKey);
153 if (mapEnabled && *mapEnabled) {
154 ownerMatch = find_map_entry(STANDBY_UID_MAP, &uid);
155 if (ownerMatch) return *ownerMatch;
156 }
157 mapEnabled = find_map_entry(POWERSAVE_UID_MAP, &mapSettingKey);
158 if (mapEnabled && *mapEnabled) {
159 ownerMatch = find_map_entry(POWERSAVE_UID_MAP, &uid);
160 if (ownerMatch) return *ownerMatch;
161 return BPF_DROP;
162 }
163 return BPF_PASS;
164 }
165
bpf_traffic_account(struct __sk_buff * skb,int direction)166 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
167 uint32_t sock_uid = get_socket_uid(skb);
168 int match = bpf_owner_match(skb, sock_uid);
169 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
170 // If an outbound packet is going to be dropped, we do not count that
171 // traffic.
172 return match;
173 }
174
175 uint64_t cookie = get_socket_cookie(skb);
176 struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie);
177 uint32_t uid, tag;
178 if (utag) {
179 uid = utag->uid;
180 tag = utag->tag;
181 } else {
182 uid = sock_uid;
183 tag = 0;
184 }
185
186 struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
187
188 uint8_t* counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid);
189 if (counterSet) key.counterSet = (uint32_t)*counterSet;
190
191 if (tag) {
192 bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
193 }
194
195 key.tag = 0;
196 bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
197 bpf_update_stats(skb, APP_UID_STATS_MAP, direction, &uid);
198 return match;
199 }
200