1 /* 2 * Example wrapper around BPF macros. 3 * 4 * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org> 5 * Author: Will Drewry <wad@chromium.org> 6 * 7 * The code may be used by anyone for any purpose, 8 * and can serve as a starting point for developing 9 * applications using prctl(PR_SET_SECCOMP, 2, ...). 10 * 11 * No guarantees are provided with respect to the correctness 12 * or functionality of this code. 13 */ 14 #ifndef __BPF_HELPER_H__ 15 #define __BPF_HELPER_H__ 16 17 #include <asm/bitsperlong.h> /* for __BITS_PER_LONG */ 18 #include <endian.h> 19 #include <linux/filter.h> 20 #include <linux/seccomp.h> /* for seccomp_data */ 21 #include <linux/types.h> 22 #include <linux/unistd.h> 23 #include <stddef.h> 24 25 #define BPF_LABELS_MAX 256 26 struct bpf_labels { 27 int count; 28 struct __bpf_label { 29 const char *label; 30 __u32 location; 31 } labels[BPF_LABELS_MAX]; 32 }; 33 34 int bpf_resolve_jumps(struct bpf_labels *labels, 35 struct sock_filter *filter, size_t count); 36 __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label); 37 void seccomp_bpf_print(struct sock_filter *filter, size_t count); 38 39 #define JUMP_JT 0xff 40 #define JUMP_JF 0xff 41 #define LABEL_JT 0xfe 42 #define LABEL_JF 0xfe 43 44 #define ALLOW \ 45 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW) 46 #define DENY \ 47 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL) 48 #define JUMP(labels, label) \ 49 BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \ 50 JUMP_JT, JUMP_JF) 51 #define LABEL(labels, label) \ 52 BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \ 53 LABEL_JT, LABEL_JF) 54 #define SYSCALL(nr, jt) \ 55 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \ 56 jt 57 58 /* Lame, but just an example */ 59 #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label) 60 61 #define EXPAND(...) __VA_ARGS__ 62 63 /* Ensure that we load the logically correct offset. */ 64 #if __BYTE_ORDER == __LITTLE_ENDIAN 65 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) 66 #elif __BYTE_ORDER == __BIG_ENDIAN 67 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) 68 #else 69 #error "Unknown endianness" 70 #endif 71 72 /* Map all width-sensitive operations */ 73 #if __BITS_PER_LONG == 32 74 75 #define JEQ(x, jt) JEQ32(x, EXPAND(jt)) 76 #define JNE(x, jt) JNE32(x, EXPAND(jt)) 77 #define JGT(x, jt) JGT32(x, EXPAND(jt)) 78 #define JLT(x, jt) JLT32(x, EXPAND(jt)) 79 #define JGE(x, jt) JGE32(x, EXPAND(jt)) 80 #define JLE(x, jt) JLE32(x, EXPAND(jt)) 81 #define JA(x, jt) JA32(x, EXPAND(jt)) 82 #define ARG(i) ARG_32(i) 83 84 #elif __BITS_PER_LONG == 64 85 86 /* Ensure that we load the logically correct offset. */ 87 #if __BYTE_ORDER == __LITTLE_ENDIAN 88 #define ENDIAN(_lo, _hi) _lo, _hi 89 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) 90 #elif __BYTE_ORDER == __BIG_ENDIAN 91 #define ENDIAN(_lo, _hi) _hi, _lo 92 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) 93 #endif 94 95 union arg64 { 96 struct { 97 __u32 ENDIAN(lo32, hi32); 98 }; 99 __u64 u64; 100 }; 101 102 #define JEQ(x, jt) \ 103 JEQ64(((union arg64){.u64 = (x)}).lo32, \ 104 ((union arg64){.u64 = (x)}).hi32, \ 105 EXPAND(jt)) 106 #define JGT(x, jt) \ 107 JGT64(((union arg64){.u64 = (x)}).lo32, \ 108 ((union arg64){.u64 = (x)}).hi32, \ 109 EXPAND(jt)) 110 #define JGE(x, jt) \ 111 JGE64(((union arg64){.u64 = (x)}).lo32, \ 112 ((union arg64){.u64 = (x)}).hi32, \ 113 EXPAND(jt)) 114 #define JNE(x, jt) \ 115 JNE64(((union arg64){.u64 = (x)}).lo32, \ 116 ((union arg64){.u64 = (x)}).hi32, \ 117 EXPAND(jt)) 118 #define JLT(x, jt) \ 119 JLT64(((union arg64){.u64 = (x)}).lo32, \ 120 ((union arg64){.u64 = (x)}).hi32, \ 121 EXPAND(jt)) 122 #define JLE(x, jt) \ 123 JLE64(((union arg64){.u64 = (x)}).lo32, \ 124 ((union arg64){.u64 = (x)}).hi32, \ 125 EXPAND(jt)) 126 127 #define JA(x, jt) \ 128 JA64(((union arg64){.u64 = (x)}).lo32, \ 129 ((union arg64){.u64 = (x)}).hi32, \ 130 EXPAND(jt)) 131 #define ARG(i) ARG_64(i) 132 133 #else 134 #error __BITS_PER_LONG value unusable. 135 #endif 136 137 /* Loads the arg into A */ 138 #define ARG_32(idx) \ 139 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) 140 141 /* Loads lo into M[0] and hi into M[1] and A */ 142 #define ARG_64(idx) \ 143 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ 144 BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ 145 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \ 146 BPF_STMT(BPF_ST, 1) /* hi -> M[1] */ 147 148 #define JEQ32(value, jt) \ 149 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \ 150 jt 151 152 #define JNE32(value, jt) \ 153 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ 154 jt 155 156 #define JA32(value, jt) \ 157 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ 158 jt 159 160 #define JGE32(value, jt) \ 161 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ 162 jt 163 164 #define JGT32(value, jt) \ 165 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ 166 jt 167 168 #define JLE32(value, jt) \ 169 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ 170 jt 171 172 #define JLT32(value, jt) \ 173 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ 174 jt 175 176 /* 177 * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both 178 * A and M[1]. This invariant is kept by restoring A if necessary. 179 */ 180 #define JEQ64(lo, hi, jt) \ 181 /* if (hi != arg.hi) goto NOMATCH; */ \ 182 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ 183 BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ 184 /* if (lo != arg.lo) goto NOMATCH; */ \ 185 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ 186 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 187 jt, \ 188 BPF_STMT(BPF_LD+BPF_MEM, 1) 189 190 #define JNE64(lo, hi, jt) \ 191 /* if (hi != arg.hi) goto MATCH; */ \ 192 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ 193 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 194 /* if (lo != arg.lo) goto MATCH; */ \ 195 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ 196 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 197 jt, \ 198 BPF_STMT(BPF_LD+BPF_MEM, 1) 199 200 #define JA64(lo, hi, jt) \ 201 /* if (hi & arg.hi) goto MATCH; */ \ 202 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ 203 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 204 /* if (lo & arg.lo) goto MATCH; */ \ 205 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ 206 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 207 jt, \ 208 BPF_STMT(BPF_LD+BPF_MEM, 1) 209 210 #define JGE64(lo, hi, jt) \ 211 /* if (hi > arg.hi) goto MATCH; */ \ 212 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ 213 /* if (hi != arg.hi) goto NOMATCH; */ \ 214 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ 215 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 216 /* if (lo >= arg.lo) goto MATCH; */ \ 217 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ 218 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 219 jt, \ 220 BPF_STMT(BPF_LD+BPF_MEM, 1) 221 222 #define JGT64(lo, hi, jt) \ 223 /* if (hi > arg.hi) goto MATCH; */ \ 224 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ 225 /* if (hi != arg.hi) goto NOMATCH; */ \ 226 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ 227 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 228 /* if (lo > arg.lo) goto MATCH; */ \ 229 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ 230 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 231 jt, \ 232 BPF_STMT(BPF_LD+BPF_MEM, 1) 233 234 #define JLE64(lo, hi, jt) \ 235 /* if (hi < arg.hi) goto MATCH; */ \ 236 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ 237 /* if (hi != arg.hi) goto NOMATCH; */ \ 238 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ 239 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 240 /* if (lo <= arg.lo) goto MATCH; */ \ 241 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ 242 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 243 jt, \ 244 BPF_STMT(BPF_LD+BPF_MEM, 1) 245 246 #define JLT64(lo, hi, jt) \ 247 /* if (hi < arg.hi) goto MATCH; */ \ 248 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ 249 /* if (hi != arg.hi) goto NOMATCH; */ \ 250 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ 251 BPF_STMT(BPF_LD+BPF_MEM, 0), \ 252 /* if (lo < arg.lo) goto MATCH; */ \ 253 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ 254 BPF_STMT(BPF_LD+BPF_MEM, 1), \ 255 jt, \ 256 BPF_STMT(BPF_LD+BPF_MEM, 1) 257 258 #define LOAD_SYSCALL_NR \ 259 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ 260 offsetof(struct seccomp_data, nr)) 261 262 #endif /* __BPF_HELPER_H__ */ 263