• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * This h file together with netd.c is used for compiling the eBPF kernel
19  * program.
20  */
21 
22 #include <bpf_helpers.h>
23 #include <linux/bpf.h>
24 #include <linux/if.h>
25 #include <linux/if_ether.h>
26 #include <linux/in.h>
27 #include <linux/in6.h>
28 #include <linux/ip.h>
29 #include <linux/ipv6.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include "netdbpf/bpf_shared.h"
33 
34 typedef struct {
35     uint32_t uid;
36     uint32_t tag;
37 } uid_tag;
38 
39 typedef struct {
40     uint32_t uid;
41     uint32_t tag;
42     uint32_t counterSet;
43     uint32_t ifaceIndex;
44 } stats_key;
45 
46 typedef struct {
47     uint64_t rxPackets;
48     uint64_t rxBytes;
49     uint64_t txPackets;
50     uint64_t txBytes;
51 } stats_value;
52 
53 typedef struct {
54     char name[IFNAMSIZ];
55 } IfaceValue;
56 
57 // This is defined for cgroup bpf filter only.
58 #define BPF_PASS 1
59 #define BPF_DROP 0
60 
61 // This is used for xt_bpf program only.
62 #define BPF_NOMATCH 0
63 #define BPF_MATCH 1
64 
65 #define BPF_EGRESS 0
66 #define BPF_INGRESS 1
67 
68 #define IP_PROTO_OFF offsetof(struct iphdr, protocol)
69 #define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
70 #define IPPROTO_IHL_OFF 0
71 #define TCP_FLAG_OFF 13
72 #define RST_OFFSET 2
73 
DEFINE_BPF_MAP(cookie_tag_map,HASH,uint64_t,uid_tag,COOKIE_UID_MAP_SIZE)74 DEFINE_BPF_MAP(cookie_tag_map, HASH, uint64_t, uid_tag, COOKIE_UID_MAP_SIZE)
75 DEFINE_BPF_MAP(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
76 DEFINE_BPF_MAP(app_uid_stats_map, HASH, uint32_t, stats_value, APP_STATS_MAP_SIZE)
77 DEFINE_BPF_MAP(stats_map_A, HASH, stats_key, stats_value, STATS_MAP_SIZE)
78 DEFINE_BPF_MAP(stats_map_B, HASH, stats_key, stats_value, STATS_MAP_SIZE)
79 DEFINE_BPF_MAP(iface_stats_map, HASH, uint32_t, stats_value, IFACE_STATS_MAP_SIZE)
80 DEFINE_BPF_MAP(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE)
81 DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
82 
83 /* never actually used from ebpf */
84 DEFINE_BPF_MAP_NO_ACCESSORS(iface_index_name_map, HASH, uint32_t, IfaceValue,
85                             IFACE_INDEX_NAME_MAP_SIZE)
86 
87 static __always_inline int is_system_uid(uint32_t uid) {
88     return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
89 }
90 
91 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey)                                          \
92     static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb,           \
93                                                               int direction, TypeOfKey* key) { \
94         stats_value* value;                                                                    \
95         value = bpf_##the_stats_map##_lookup_elem(key);                                        \
96         if (!value) {                                                                          \
97             stats_value newValue = {};                                                         \
98             bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST);                    \
99             value = bpf_##the_stats_map##_lookup_elem(key);                                    \
100         }                                                                                      \
101         if (value) {                                                                           \
102             if (direction == BPF_EGRESS) {                                                     \
103                 __sync_fetch_and_add(&value->txPackets, 1);                                    \
104                 __sync_fetch_and_add(&value->txBytes, skb->len);                               \
105             } else if (direction == BPF_INGRESS) {                                             \
106                 __sync_fetch_and_add(&value->rxPackets, 1);                                    \
107                 __sync_fetch_and_add(&value->rxBytes, skb->len);                               \
108             }                                                                                  \
109         }                                                                                      \
110     }
111 
DEFINE_UPDATE_STATS(app_uid_stats_map,uint32_t)112 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
113 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
114 DEFINE_UPDATE_STATS(stats_map_A, stats_key)
115 DEFINE_UPDATE_STATS(stats_map_B, stats_key)
116 
117 static inline bool skip_owner_match(struct __sk_buff* skb) {
118     int offset = -1;
119     int ret = 0;
120     if (skb->protocol == ETH_P_IP) {
121         offset = IP_PROTO_OFF;
122         uint8_t proto, ihl;
123         uint16_t flag;
124         ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
125         if (!ret) {
126             if (proto == IPPROTO_ESP) {
127                 return true;
128             } else if (proto == IPPROTO_TCP) {
129                 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
130                 ihl = ihl & 0x0F;
131                 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
132                 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
133                     return true;
134                 }
135             }
136         }
137     } else if (skb->protocol == ETH_P_IPV6) {
138         offset = IPV6_PROTO_OFF;
139         uint8_t proto;
140         ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
141         if (!ret) {
142             if (proto == IPPROTO_ESP) {
143                 return true;
144             } else if (proto == IPPROTO_TCP) {
145                 uint16_t flag;
146                 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
147                 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
148                     return true;
149                 }
150             }
151         }
152     }
153     return false;
154 }
155 
getConfig(uint32_t configKey)156 static __always_inline BpfConfig getConfig(uint32_t configKey) {
157     uint32_t mapSettingKey = configKey;
158     BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
159     if (!config) {
160         // Couldn't read configuration entry. Assume everything is disabled.
161         return DEFAULT_CONFIG;
162     }
163     return *config;
164 }
165 
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,int direction)166 static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
167     if (skip_owner_match(skb)) return BPF_PASS;
168 
169     if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
170 
171     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
172 
173     UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
174     uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
175     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
176 
177     if (enabledRules) {
178         if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
179             return BPF_DROP;
180         }
181         if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
182             return BPF_DROP;
183         }
184         if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
185             return BPF_DROP;
186         }
187     }
188     if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
189         // Drops packets not coming from lo nor the whitelisted interface
190         if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
191             return BPF_DROP;
192         }
193     }
194     return BPF_PASS;
195 }
196 
update_stats_with_config(struct __sk_buff * skb,int direction,stats_key * key,uint8_t selectedMap)197 static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
198                                                             stats_key* key, uint8_t selectedMap) {
199     if (selectedMap == SELECT_MAP_A) {
200         update_stats_map_A(skb, direction, key);
201     } else if (selectedMap == SELECT_MAP_B) {
202         update_stats_map_B(skb, direction, key);
203     }
204 }
205 
bpf_traffic_account(struct __sk_buff * skb,int direction)206 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
207     uint32_t sock_uid = bpf_get_socket_uid(skb);
208     int match = bpf_owner_match(skb, sock_uid, direction);
209     if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
210         // If an outbound packet is going to be dropped, we do not count that
211         // traffic.
212         return match;
213     }
214 
215     uint64_t cookie = bpf_get_socket_cookie(skb);
216     uid_tag* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
217     uint32_t uid, tag;
218     if (utag) {
219         uid = utag->uid;
220         tag = utag->tag;
221     } else {
222         uid = sock_uid;
223         tag = 0;
224     }
225 
226     stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
227 
228     uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
229     if (counterSet) key.counterSet = (uint32_t)*counterSet;
230 
231     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
232     uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
233     if (!selectedMap) {
234         return match;
235     }
236 
237     if (key.tag) {
238         update_stats_with_config(skb, direction, &key, *selectedMap);
239         key.tag = 0;
240     }
241 
242     update_stats_with_config(skb, direction, &key, *selectedMap);
243     update_app_uid_stats_map(skb, direction, &uid);
244     return match;
245 }
246