• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Common BPF helpers to be used by all BPF programs loaded by Android */
2 
3 #include <linux/bpf.h>
4 #include <stdbool.h>
5 #include <stdint.h>
6 
7 #include "bpf_map_def.h"
8 
9 /******************************************************************************
10  * WARNING: CHANGES TO THIS FILE OUTSIDE OF AOSP/MASTER ARE LIKELY TO BREAK   *
11  * DEVICE COMPATIBILITY WITH MAINLINE MODULES SHIPPING EBPF CODE.             *
12  *                                                                            *
13  * THIS WILL LIKELY RESULT IN BRICKED DEVICES AT SOME ARBITRARY FUTURE TIME   *
14  *                                                                            *
15  * THAT GOES ESPECIALLY FOR THE 'SECTION' 'LICENSE' AND 'CRITICAL' MACROS     *
16  *                                                                            *
17  * We strongly suggest that if you need changes to bpfloader functionality    *
18  * you get your changes reviewed and accepted into aosp/master.               *
19  *                                                                            *
20  ******************************************************************************/
21 
22 // The actual versions of the bpfloader that shipped in various Android releases
23 
24 // Android P/Q/R: BpfLoader was initially part of netd,
25 // this was later split out into a standalone binary, but was unversioned.
26 
27 // Android S / 12 (api level 31) - added 'tethering' mainline eBPF support
28 #define BPFLOADER_S_VERSION 2u
29 
30 // Android T / 13 Beta 3 (api level 33) - added support for 'netd_shared'
31 #define BPFLOADER_T_BETA3_VERSION 13u
32 
33 /* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER
34  * before #include "bpf_helpers.h" to change which bpfloaders will
35  * process the resulting .o file.
36  *
37  * While this will work outside of mainline too, there just is no point to
38  * using it when the .o and the bpfloader ship in sync with each other.
39  */
40 #ifndef BPFLOADER_MIN_VER
41 #define BPFLOADER_MIN_VER DEFAULT_BPFLOADER_MIN_VER
42 #endif
43 
44 #ifndef BPFLOADER_MAX_VER
45 #define BPFLOADER_MAX_VER DEFAULT_BPFLOADER_MAX_VER
46 #endif
47 
48 /* place things in different elf sections */
49 #define SECTION(NAME) __attribute__((section(NAME), used))
50 
51 /* Must be present in every program, example usage:
52  *   LICENSE("GPL"); or LICENSE("Apache 2.0");
53  *
54  * We also take this opportunity to embed a bunch of other useful values in
55  * the resulting .o (This is to enable some limited forward compatibility
56  * with mainline module shipped ebpf programs)
57  *
58  * The bpfloader_{min/max}_ver defines the [min, max) range of bpfloader
59  * versions that should load this .o file (bpfloaders outside of this range
60  * will simply ignore/skip this *entire* .o)
61  * The [inclusive,exclusive) matches what we do for kernel ver dependencies.
62  *
63  * The size_of_bpf_{map,prog}_def allow the bpfloader to load programs where
64  * these structures have been extended with additional fields (they will of
65  * course simply be ignored then).
66  *
67  * If missing, bpfloader_{min/max}_ver default to 0/0x10000 ie. [v0.0, v1.0),
68  * while size_of_bpf_{map/prog}_def default to 32/20 which are the v0.0 sizes.
69  */
70 #define LICENSE(NAME)                                                                           \
71     unsigned int _bpfloader_min_ver SECTION("bpfloader_min_ver") = BPFLOADER_MIN_VER;           \
72     unsigned int _bpfloader_max_ver SECTION("bpfloader_max_ver") = BPFLOADER_MAX_VER;           \
73     size_t _size_of_bpf_map_def SECTION("size_of_bpf_map_def") = sizeof(struct bpf_map_def);    \
74     size_t _size_of_bpf_prog_def SECTION("size_of_bpf_prog_def") = sizeof(struct bpf_prog_def); \
75     char _license[] SECTION("license") = (NAME)
76 
77 /* flag the resulting bpf .o file as critical to system functionality,
78  * loading all kernel version appropriate programs in it must succeed
79  * for bpfloader success
80  */
81 #define CRITICAL(REASON) char _critical[] SECTION("critical") = (REASON)
82 
83 /*
84  * Helper functions called from eBPF programs written in C. These are
85  * implemented in the kernel sources.
86  */
87 
88 #define KVER_NONE 0
89 #define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c))
90 #define KVER_INF 0xFFFFFFFFu
91 
92 /* generic functions */
93 
94 /*
95  * Type-unsafe bpf map functions - avoid if possible.
96  *
97  * Using these it is possible to pass in keys/values of the wrong type/size,
98  * or, for 'bpf_map_lookup_elem_unsafe' receive into a pointer to the wrong type.
99  * You will not get a compile time failure, and for certain types of errors you
100  * might not even get a failure from the kernel's ebpf verifier during program load,
101  * instead stuff might just not work right at runtime.
102  *
103  * Instead please use:
104  *   DEFINE_BPF_MAP(foo_map, TYPE, KeyType, ValueType, num_entries)
105  * where TYPE can be something like HASH or ARRAY, and num_entries is an integer.
106  *
107  * This defines the map (hence this should not be used in a header file included
108  * from multiple locations) and provides type safe accessors:
109  *   ValueType * bpf_foo_map_lookup_elem(const KeyType *)
110  *   int bpf_foo_map_update_elem(const KeyType *, const ValueType *, flags)
111  *   int bpf_foo_map_delete_elem(const KeyType *)
112  *
113  * This will make sure that if you change the type of a map you'll get compile
114  * errors at any spots you forget to update with the new type.
115  *
116  * Note: these all take pointers to const map because from the C/eBPF point of view
117  * the map struct is really just a readonly map definition of the in kernel object.
118  * Runtime modification of the map defining struct is meaningless, since
119  * the contents is only ever used during bpf program loading & map creation
120  * by the bpf loader, and not by the eBPF program itself.
121  */
122 static void* (*bpf_map_lookup_elem_unsafe)(const struct bpf_map_def* map,
123                                            const void* key) = (void*)BPF_FUNC_map_lookup_elem;
124 static int (*bpf_map_update_elem_unsafe)(const struct bpf_map_def* map, const void* key,
125                                          const void* value, unsigned long long flags) = (void*)
126         BPF_FUNC_map_update_elem;
127 static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map,
128                                          const void* key) = (void*)BPF_FUNC_map_delete_elem;
129 
130 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)  \
131         struct ____btf_map_##name {                     \
132                 type_key key;                           \
133                 type_val value;                         \
134         };                                              \
135         struct ____btf_map_##name                       \
136         __attribute__ ((section(".maps." #name), used)) \
137                 ____btf_map_##name = { }
138 
139 /* type safe macro to declare a map and related accessor functions */
140 #define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md,         \
141                            selinux, pindir, share)                                               \
142     const struct bpf_map_def SECTION("maps") the_map = {                                         \
143             .type = BPF_MAP_TYPE_##TYPE,                                                         \
144             .key_size = sizeof(KeyType),                                                         \
145             .value_size = sizeof(ValueType),                                                     \
146             .max_entries = (num_entries),                                                        \
147             .map_flags = 0,                                                                      \
148             .uid = (usr),                                                                        \
149             .gid = (grp),                                                                        \
150             .mode = (md),                                                                        \
151             .bpfloader_min_ver = DEFAULT_BPFLOADER_MIN_VER,                                      \
152             .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER,                                      \
153             .min_kver = KVER_NONE,                                                               \
154             .max_kver = KVER_INF,                                                                \
155             .selinux_context = selinux,                                                          \
156             .pin_subdir = pindir,                                                                \
157             .shared = share,                                                                     \
158     };                                                                                           \
159     BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType);                                           \
160                                                                                                  \
161     static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem(               \
162             const KeyType* k) {                                                                  \
163         return bpf_map_lookup_elem_unsafe(&the_map, k);                                          \
164     };                                                                                           \
165                                                                                                  \
166     static inline __always_inline __unused int bpf_##the_map##_update_elem(                      \
167             const KeyType* k, const ValueType* v, unsigned long long flags) {                    \
168         return bpf_map_update_elem_unsafe(&the_map, k, v, flags);                                \
169     };                                                                                           \
170                                                                                                  \
171     static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) {   \
172         return bpf_map_delete_elem_unsafe(&the_map, k);                                          \
173     };
174 
175 #define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \
176     DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, "", "", false)
177 
178 #define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \
179     DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, AID_ROOT, 0600)
180 
181 #define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
182     DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0620)
183 
184 #define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
185     DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0640)
186 
187 #define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
188     DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0660)
189 
190 static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read;
191 static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str;
192 static unsigned long long (*bpf_ktime_get_ns)(void) = (void*) BPF_FUNC_ktime_get_ns;
193 static unsigned long long (*bpf_ktime_get_boot_ns)(void) = (void*)BPF_FUNC_ktime_get_boot_ns;
194 static int (*bpf_trace_printk)(const char* fmt, int fmt_size, ...) = (void*) BPF_FUNC_trace_printk;
195 static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_get_current_pid_tgid;
196 static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid;
197 static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id;
198 
199 #define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt,       \
200                             selinux, pindir)                                                       \
201     const struct bpf_prog_def SECTION("progs") the_prog##_def = {                                  \
202             .uid = (prog_uid),                                                                     \
203             .gid = (prog_gid),                                                                     \
204             .min_kver = (min_kv),                                                                  \
205             .max_kver = (max_kv),                                                                  \
206             .optional = (opt),                                                                     \
207             .bpfloader_min_ver = DEFAULT_BPFLOADER_MIN_VER,                                        \
208             .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER,                                        \
209             .selinux_context = selinux,                                                            \
210             .pin_subdir = pindir,                                                                  \
211     };                                                                                             \
212     SECTION(SECTION_NAME)                                                                          \
213     int the_prog
214 
215 #define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
216                                        opt) \
217     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt, "", "")
218 
219 // Programs (here used in the sense of functions/sections) marked optional are allowed to fail
220 // to load (for example due to missing kernel patches).
221 // The bpfloader will just ignore these failures and continue processing the next section.
222 //
223 // A non-optional program (function/section) failing to load causes a failure and aborts
224 // processing of the entire .o, if the .o is additionally marked critical, this will result
225 // in the entire bpfloader process terminating with a failure and not setting the bpf.progs_loaded
226 // system property.  This in turn results in waitForProgsLoaded() never finishing.
227 //
228 // ie. a non-optional program in a critical .o is mandatory for kernels matching the min/max kver.
229 
230 // programs requiring a kernel version >= min_kv && < max_kv
231 #define DEFINE_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv) \
232     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
233                                    false)
234 #define DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, \
235                                             max_kv)                                             \
236     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, true)
237 
238 // programs requiring a kernel version >= min_kv
239 #define DEFINE_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)                 \
240     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
241                                    false)
242 #define DEFINE_OPTIONAL_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)        \
243     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
244                                    true)
245 
246 // programs with no kernel version requirements
247 #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
248     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, false)
249 #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
250     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, true)
251