1 /* bpf.h
2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 *
6 * Berkeley Packet Filter functions.
7 */
8
9 #ifndef BPF_H
10 #define BPF_H
11
12 #include <asm/bitsperlong.h> /* for __BITS_PER_LONG */
13 #include <endian.h>
14 #include <linux/audit.h>
15 #include <linux/filter.h>
16 #include <stddef.h>
17 #include <sys/user.h>
18
19 #include "arch.h"
20
21 #if __BITS_PER_LONG == 32 || defined(__ILP32__)
22 #define BITS32
23 #elif __BITS_PER_LONG == 64
24 #define BITS64
25 #endif
26
27 /* Constants for comparison operators. */
28 #define MIN_OPERATOR 128
29 enum operator {
30 EQ = MIN_OPERATOR,
31 NE,
32 LT,
33 LE,
34 GT,
35 GE,
36 SET
37 };
38
39 /*
40 * BPF return values and data structures,
41 * since they're not yet in the kernel.
42 */
43 #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
44 #define SECCOMP_RET_TRAP 0x00030000U /* return SIGSYS */
45 #define SECCOMP_RET_ERRNO 0x00050000U /* return -1 and set errno */
46 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
47
48 #define SECCOMP_RET_DATA 0x0000ffffU /* mask for return value */
49
50 struct seccomp_data {
51 int nr;
52 __u32 arch;
53 __u64 instruction_pointer;
54 __u64 args[6];
55 };
56
57 #define syscall_nr (offsetof(struct seccomp_data, nr))
58 #define arch_nr (offsetof(struct seccomp_data, arch))
59
60 /* Size-dependent defines. */
61 #if defined(BITS32)
62 /*
63 * On 32 bits, comparisons take 2 instructions: 1 for loading the argument,
64 * 1 for the actual comparison.
65 */
66 #define BPF_LOAD_ARG_LEN 1U
67 #define BPF_COMP_LEN 1U
68 #define BPF_ARG_COMP_LEN (BPF_LOAD_ARG_LEN + BPF_COMP_LEN)
69
70 #define bpf_comp_jeq bpf_comp_jeq32
71 #define bpf_comp_jset bpf_comp_jset32
72
73 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
74
75 #elif defined(BITS64)
76 /*
77 * On 64 bits, comparisons take 7 instructions: 4 for loading the argument,
78 * and 3 for the actual comparison.
79 */
80 #define BPF_LOAD_ARG_LEN 4U
81 #define BPF_COMP_LEN 3U
82 #define BPF_ARG_COMP_LEN (BPF_LOAD_ARG_LEN + BPF_COMP_LEN)
83
84 #define bpf_comp_jeq bpf_comp_jeq64
85 #define bpf_comp_jset bpf_comp_jset64
86
87 /* Ensure that we load the logically correct offset. */
88 #if defined(__LITTLE_ENDIAN__) || __BYTE_ORDER == __LITTLE_ENDIAN
89 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
90 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
91 #else
92 #error "Unsupported endianness"
93 #endif
94
95 #else
96 #error "Unknown bit width"
97
98 #endif
99
100 /* Common jump targets. */
101 #define NEXT 0
102 #define SKIP 1
103 #define SKIPN(_n) (_n)
104
105 /* Support for labels in BPF programs. */
106 #define JUMP_JT 0xff
107 #define JUMP_JF 0xff
108 #define LABEL_JT 0xfe
109 #define LABEL_JF 0xfe
110
111 #define MAX_BPF_LABEL_LEN 32
112
113 #define BPF_LABELS_MAX 256
114 struct bpf_labels {
115 int count;
116 struct __bpf_label {
117 const char *label;
118 unsigned int location;
119 } labels[BPF_LABELS_MAX];
120 };
121
122 /* BPF instruction manipulation functions and macros. */
set_bpf_instr(struct sock_filter * instr,unsigned short code,unsigned int k,unsigned char jt,unsigned char jf)123 static inline size_t set_bpf_instr(struct sock_filter *instr,
124 unsigned short code, unsigned int k,
125 unsigned char jt, unsigned char jf)
126 {
127 instr->code = code;
128 instr->k = k;
129 instr->jt = jt;
130 instr->jf = jf;
131 return 1U;
132 }
133
134 #define set_bpf_stmt(_block, _code, _k) \
135 set_bpf_instr((_block), (_code), (_k), 0, 0)
136
137 #define set_bpf_jump(_block, _code, _k, _jt, _jf) \
138 set_bpf_instr((_block), (_code), (_k), (_jt), (_jf))
139
140 #define set_bpf_lbl(_block, _lbl_id) \
141 set_bpf_jump((_block), BPF_JMP+BPF_JA, (_lbl_id), \
142 LABEL_JT, LABEL_JF)
143
144 #define set_bpf_jump_lbl(_block, _lbl_id) \
145 set_bpf_jump((_block), BPF_JMP+BPF_JA, (_lbl_id), \
146 JUMP_JT, JUMP_JF)
147
148 #define set_bpf_ret_kill(_block) \
149 set_bpf_stmt((_block), BPF_RET+BPF_K, SECCOMP_RET_KILL)
150
151 #define set_bpf_ret_trap(_block) \
152 set_bpf_stmt((_block), BPF_RET+BPF_K, SECCOMP_RET_TRAP)
153
154 #define set_bpf_ret_errno(_block, _errno) \
155 set_bpf_stmt((_block), BPF_RET+BPF_K, \
156 SECCOMP_RET_ERRNO | ((_errno) & SECCOMP_RET_DATA))
157
158 #define set_bpf_ret_allow(_block) \
159 set_bpf_stmt((_block), BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
160
161 #define bpf_load_syscall_nr(_filter) \
162 set_bpf_stmt((_filter), BPF_LD+BPF_W+BPF_ABS, syscall_nr)
163
164 /* BPF label functions. */
165 int bpf_resolve_jumps(struct bpf_labels *labels,
166 struct sock_filter *filter, size_t count);
167 int bpf_label_id(struct bpf_labels *labels, const char *label);
168 void free_label_strings(struct bpf_labels *labels);
169
170 /* BPF helper functions. */
171 size_t bpf_load_arg(struct sock_filter *filter, int argidx);
172 size_t bpf_comp_jeq(struct sock_filter *filter, unsigned long c,
173 unsigned char jt, unsigned char jf);
174 size_t bpf_comp_jset(struct sock_filter *filter, unsigned long mask,
175 unsigned char jt, unsigned char jf);
176
177 /* Functions called by syscall_filter.c */
178 #define ARCH_VALIDATION_LEN 3U
179 #define ALLOW_SYSCALL_LEN 2U
180
181 size_t bpf_arg_comp(struct sock_filter **pfilter,
182 int op, int argidx, unsigned long c, unsigned int label_id);
183 size_t bpf_validate_arch(struct sock_filter *filter);
184 size_t bpf_allow_syscall(struct sock_filter *filter, int nr);
185 size_t bpf_allow_syscall_args(struct sock_filter *filter,
186 int nr, unsigned int id);
187
188 /* Debug functions. */
189 void dump_bpf_prog(struct sock_fprog *fprog);
190 void dump_bpf_filter(struct sock_filter *filter, unsigned short len);
191
192 #endif /* BPF_H */
193