1 #ifndef __BPF_API__ 2 #define __BPF_API__ 3 4 /* Note: 5 * 6 * This file can be included into eBPF kernel programs. It contains 7 * a couple of useful helper functions, map/section ABI (bpf_elf.h), 8 * misc macros and some eBPF specific LLVM built-ins. 9 */ 10 11 #include <stdint.h> 12 13 #include <linux/pkt_cls.h> 14 #include <linux/bpf.h> 15 #include <linux/filter.h> 16 17 #include <asm/byteorder.h> 18 19 #include "bpf_elf.h" 20 21 /** Misc macros. */ 22 23 #ifndef __stringify 24 # define __stringify(X) #X 25 #endif 26 27 #ifndef __maybe_unused 28 # define __maybe_unused __attribute__((__unused__)) 29 #endif 30 31 #ifndef offsetof 32 # define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) 33 #endif 34 35 #ifndef likely 36 # define likely(X) __builtin_expect(!!(X), 1) 37 #endif 38 39 #ifndef unlikely 40 # define unlikely(X) __builtin_expect(!!(X), 0) 41 #endif 42 43 #ifndef htons 44 # define htons(X) __constant_htons((X)) 45 #endif 46 47 #ifndef ntohs 48 # define ntohs(X) __constant_ntohs((X)) 49 #endif 50 51 #ifndef htonl 52 # define htonl(X) __constant_htonl((X)) 53 #endif 54 55 #ifndef ntohl 56 # define ntohl(X) __constant_ntohl((X)) 57 #endif 58 59 #ifndef __inline__ 60 # define __inline__ __attribute__((always_inline)) 61 #endif 62 63 /** Section helper macros. */ 64 65 #ifndef __section 66 # define __section(NAME) \ 67 __attribute__((section(NAME), used)) 68 #endif 69 70 #ifndef __section_tail 71 # define __section_tail(ID, KEY) \ 72 __section(__stringify(ID) "/" __stringify(KEY)) 73 #endif 74 75 #ifndef __section_xdp_entry 76 # define __section_xdp_entry \ 77 __section(ELF_SECTION_PROG) 78 #endif 79 80 #ifndef __section_cls_entry 81 # define __section_cls_entry \ 82 __section(ELF_SECTION_CLASSIFIER) 83 #endif 84 85 #ifndef __section_act_entry 86 # define __section_act_entry \ 87 __section(ELF_SECTION_ACTION) 88 #endif 89 90 #ifndef __section_lwt_entry 91 # define __section_lwt_entry \ 92 __section(ELF_SECTION_PROG) 93 #endif 94 95 #ifndef __section_license 96 # define __section_license \ 97 __section(ELF_SECTION_LICENSE) 98 #endif 99 100 #ifndef __section_maps 101 # define __section_maps \ 102 __section(ELF_SECTION_MAPS) 103 #endif 104 105 /** Declaration helper macros. */ 106 107 #ifndef BPF_LICENSE 108 # define BPF_LICENSE(NAME) \ 109 char ____license[] __section_license = NAME 110 #endif 111 112 /** Classifier helper */ 113 114 #ifndef BPF_H_DEFAULT 115 # define BPF_H_DEFAULT -1 116 #endif 117 118 /** BPF helper functions for tc. Individual flags are in linux/bpf.h */ 119 120 #ifndef __BPF_FUNC 121 # define __BPF_FUNC(NAME, ...) \ 122 (* NAME)(__VA_ARGS__) __maybe_unused 123 #endif 124 125 #ifndef BPF_FUNC 126 # define BPF_FUNC(NAME, ...) \ 127 __BPF_FUNC(NAME, __VA_ARGS__) = (void *) BPF_FUNC_##NAME 128 #endif 129 130 /* Map access/manipulation */ 131 static void *BPF_FUNC(map_lookup_elem, void *map, const void *key); 132 static int BPF_FUNC(map_update_elem, void *map, const void *key, 133 const void *value, uint32_t flags); 134 static int BPF_FUNC(map_delete_elem, void *map, const void *key); 135 136 /* Time access */ 137 static uint64_t BPF_FUNC(ktime_get_ns); 138 139 /* Debugging */ 140 141 /* FIXME: __attribute__ ((format(printf, 1, 3))) not possible unless 142 * llvm bug https://llvm.org/bugs/show_bug.cgi?id=26243 gets resolved. 143 * It would require ____fmt to be made const, which generates a reloc 144 * entry (non-map). 145 */ 146 static void BPF_FUNC(trace_printk, const char *fmt, int fmt_size, ...); 147 148 #ifndef printt 149 # define printt(fmt, ...) \ 150 ({ \ 151 char ____fmt[] = fmt; \ 152 trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \ 153 }) 154 #endif 155 156 /* Random numbers */ 157 static uint32_t BPF_FUNC(get_prandom_u32); 158 159 /* Tail calls */ 160 static void BPF_FUNC(tail_call, struct __sk_buff *skb, void *map, 161 uint32_t index); 162 163 /* System helpers */ 164 static uint32_t BPF_FUNC(get_smp_processor_id); 165 static uint32_t BPF_FUNC(get_numa_node_id); 166 167 /* Packet misc meta data */ 168 static uint32_t BPF_FUNC(get_cgroup_classid, struct __sk_buff *skb); 169 static int BPF_FUNC(skb_under_cgroup, void *map, uint32_t index); 170 171 static uint32_t BPF_FUNC(get_route_realm, struct __sk_buff *skb); 172 static uint32_t BPF_FUNC(get_hash_recalc, struct __sk_buff *skb); 173 static uint32_t BPF_FUNC(set_hash_invalid, struct __sk_buff *skb); 174 175 /* Packet redirection */ 176 static int BPF_FUNC(redirect, int ifindex, uint32_t flags); 177 static int BPF_FUNC(clone_redirect, struct __sk_buff *skb, int ifindex, 178 uint32_t flags); 179 180 /* Packet manipulation */ 181 static int BPF_FUNC(skb_load_bytes, struct __sk_buff *skb, uint32_t off, 182 void *to, uint32_t len); 183 static int BPF_FUNC(skb_store_bytes, struct __sk_buff *skb, uint32_t off, 184 const void *from, uint32_t len, uint32_t flags); 185 186 static int BPF_FUNC(l3_csum_replace, struct __sk_buff *skb, uint32_t off, 187 uint32_t from, uint32_t to, uint32_t flags); 188 static int BPF_FUNC(l4_csum_replace, struct __sk_buff *skb, uint32_t off, 189 uint32_t from, uint32_t to, uint32_t flags); 190 static int BPF_FUNC(csum_diff, const void *from, uint32_t from_size, 191 const void *to, uint32_t to_size, uint32_t seed); 192 static int BPF_FUNC(csum_update, struct __sk_buff *skb, uint32_t wsum); 193 194 static int BPF_FUNC(skb_change_type, struct __sk_buff *skb, uint32_t type); 195 static int BPF_FUNC(skb_change_proto, struct __sk_buff *skb, uint32_t proto, 196 uint32_t flags); 197 static int BPF_FUNC(skb_change_tail, struct __sk_buff *skb, uint32_t nlen, 198 uint32_t flags); 199 200 static int BPF_FUNC(skb_pull_data, struct __sk_buff *skb, uint32_t len); 201 202 /* Event notification */ 203 static int __BPF_FUNC(skb_event_output, struct __sk_buff *skb, void *map, 204 uint64_t index, const void *data, uint32_t size) = 205 (void *) BPF_FUNC_perf_event_output; 206 207 /* Packet vlan encap/decap */ 208 static int BPF_FUNC(skb_vlan_push, struct __sk_buff *skb, uint16_t proto, 209 uint16_t vlan_tci); 210 static int BPF_FUNC(skb_vlan_pop, struct __sk_buff *skb); 211 212 /* Packet tunnel encap/decap */ 213 static int BPF_FUNC(skb_get_tunnel_key, struct __sk_buff *skb, 214 struct bpf_tunnel_key *to, uint32_t size, uint32_t flags); 215 static int BPF_FUNC(skb_set_tunnel_key, struct __sk_buff *skb, 216 const struct bpf_tunnel_key *from, uint32_t size, 217 uint32_t flags); 218 219 static int BPF_FUNC(skb_get_tunnel_opt, struct __sk_buff *skb, 220 void *to, uint32_t size); 221 static int BPF_FUNC(skb_set_tunnel_opt, struct __sk_buff *skb, 222 const void *from, uint32_t size); 223 224 /** LLVM built-ins, mem*() routines work for constant size */ 225 226 #ifndef lock_xadd 227 # define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val)) 228 #endif 229 230 #ifndef memset 231 # define memset(s, c, n) __builtin_memset((s), (c), (n)) 232 #endif 233 234 #ifndef memcpy 235 # define memcpy(d, s, n) __builtin_memcpy((d), (s), (n)) 236 #endif 237 238 #ifndef memmove 239 # define memmove(d, s, n) __builtin_memmove((d), (s), (n)) 240 #endif 241 242 /* FIXME: __builtin_memcmp() is not yet fully useable unless llvm bug 243 * https://llvm.org/bugs/show_bug.cgi?id=26218 gets resolved. Also 244 * this one would generate a reloc entry (non-map), otherwise. 245 */ 246 #if 0 247 #ifndef memcmp 248 # define memcmp(a, b, n) __builtin_memcmp((a), (b), (n)) 249 #endif 250 #endif 251 252 unsigned long long load_byte(void *skb, unsigned long long off) 253 asm ("llvm.bpf.load.byte"); 254 255 unsigned long long load_half(void *skb, unsigned long long off) 256 asm ("llvm.bpf.load.half"); 257 258 unsigned long long load_word(void *skb, unsigned long long off) 259 asm ("llvm.bpf.load.word"); 260 261 #endif /* __BPF_API__ */ 262