1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10
11 #include "bpf.h"
12 #include "util.h"
13
14 /* Architecture validation. */
bpf_validate_arch(struct sock_filter * filter)15 size_t bpf_validate_arch(struct sock_filter *filter)
16 {
17 struct sock_filter *curr_block = filter;
18 set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, arch_nr);
19 set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, ARCH_NR, SKIP,
20 NEXT);
21 set_bpf_ret_kill(curr_block++);
22 return curr_block - filter;
23 }
24
25 /* Syscall number eval functions. */
bpf_allow_syscall(struct sock_filter * filter,int nr)26 size_t bpf_allow_syscall(struct sock_filter *filter, int nr)
27 {
28 struct sock_filter *curr_block = filter;
29 set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
30 set_bpf_stmt(curr_block++, BPF_RET + BPF_K, SECCOMP_RET_ALLOW);
31 return curr_block - filter;
32 }
33
bpf_allow_syscall_args(struct sock_filter * filter,int nr,unsigned int id)34 size_t bpf_allow_syscall_args(struct sock_filter *filter, int nr,
35 unsigned int id)
36 {
37 struct sock_filter *curr_block = filter;
38 set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
39 set_bpf_jump_lbl(curr_block++, id);
40 return curr_block - filter;
41 }
42
43 /* Size-aware arg loaders. */
44 #if defined(BITS32)
bpf_load_arg(struct sock_filter * filter,int argidx)45 size_t bpf_load_arg(struct sock_filter *filter, int argidx)
46 {
47 set_bpf_stmt(filter, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
48 return 1U;
49 }
50 #elif defined(BITS64)
bpf_load_arg(struct sock_filter * filter,int argidx)51 size_t bpf_load_arg(struct sock_filter *filter, int argidx)
52 {
53 struct sock_filter *curr_block = filter;
54 set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
55 set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
56 set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, HI_ARG(argidx));
57 set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
58 return curr_block - filter;
59 }
60 #endif
61
62 /* Size-aware equality comparison. */
bpf_comp_jeq32(struct sock_filter * filter,unsigned long c,unsigned char jt,unsigned char jf)63 size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
64 unsigned char jt, unsigned char jf)
65 {
66 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
67 set_bpf_jump(filter, BPF_JMP + BPF_JEQ + BPF_K, lo, jt, jf);
68 return 1U;
69 }
70
71 /*
72 * On 64 bits, we have to do two 32-bit comparisons.
73 * We jump true when *both* comparisons are true.
74 */
75 #if defined(BITS64)
bpf_comp_jeq64(struct sock_filter * filter,uint64_t c,unsigned char jt,unsigned char jf)76 size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c, unsigned char jt,
77 unsigned char jf)
78 {
79 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
80 unsigned int hi = (unsigned int)(c >> 32);
81
82 struct sock_filter *curr_block = filter;
83
84 /* bpf_load_arg leaves |hi| in A */
85 curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
86 set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
87 curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
88
89 return curr_block - filter;
90 }
91 #endif
92
93 /* Size-aware bitwise AND. */
bpf_comp_jset32(struct sock_filter * filter,unsigned long mask,unsigned char jt,unsigned char jf)94 size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask,
95 unsigned char jt, unsigned char jf)
96 {
97 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
98 set_bpf_jump(filter, BPF_JMP + BPF_JSET + BPF_K, mask_lo, jt, jf);
99 return 1U;
100 }
101
102 /*
103 * On 64 bits, we have to do two 32-bit bitwise ANDs.
104 * We jump true when *either* bitwise AND is true (non-zero).
105 */
106 #if defined(BITS64)
bpf_comp_jset64(struct sock_filter * filter,uint64_t mask,unsigned char jt,unsigned char jf)107 size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask,
108 unsigned char jt, unsigned char jf)
109 {
110 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
111 unsigned int mask_hi = (unsigned int)(mask >> 32);
112
113 struct sock_filter *curr_block = filter;
114
115 /* bpf_load_arg leaves |hi| in A */
116 curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT);
117 set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
118 curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf);
119
120 return curr_block - filter;
121 }
122 #endif
123
bpf_comp_jin(struct sock_filter * filter,unsigned long mask,unsigned char jt,unsigned char jf)124 size_t bpf_comp_jin(struct sock_filter *filter, unsigned long mask,
125 unsigned char jt, unsigned char jf)
126 {
127 unsigned long negative_mask = ~mask;
128 /*
129 * The mask is negated, so the comparison will be true when the argument
130 * includes a flag that wasn't listed in the original (non-negated)
131 * mask. This would be the failure case, so we switch |jt| and |jf|.
132 */
133 return bpf_comp_jset(filter, negative_mask, jf, jt);
134 }
135
bpf_arg_comp(struct sock_filter ** pfilter,int op,int argidx,unsigned long c,unsigned int label_id)136 size_t bpf_arg_comp(struct sock_filter **pfilter, int op, int argidx,
137 unsigned long c, unsigned int label_id)
138 {
139 struct sock_filter *filter =
140 calloc(BPF_ARG_COMP_LEN + 1, sizeof(struct sock_filter));
141 struct sock_filter *curr_block = filter;
142 size_t (*comp_function)(struct sock_filter * filter, unsigned long k,
143 unsigned char jt, unsigned char jf);
144 int flip = 0;
145
146 /* Load arg */
147 curr_block += bpf_load_arg(curr_block, argidx);
148
149 /* Jump type */
150 switch (op) {
151 case EQ:
152 comp_function = bpf_comp_jeq;
153 flip = 0;
154 break;
155 case NE:
156 comp_function = bpf_comp_jeq;
157 flip = 1;
158 break;
159 case SET:
160 comp_function = bpf_comp_jset;
161 flip = 0;
162 break;
163 case IN:
164 comp_function = bpf_comp_jin;
165 flip = 0;
166 break;
167 default:
168 *pfilter = NULL;
169 return 0;
170 }
171
172 /*
173 * It's easier for the rest of the code to have the true branch
174 * skip and the false branch fall through.
175 */
176 unsigned char jt = flip ? NEXT : SKIP;
177 unsigned char jf = flip ? SKIP : NEXT;
178 curr_block += comp_function(curr_block, c, jt, jf);
179 curr_block += set_bpf_jump_lbl(curr_block, label_id);
180
181 *pfilter = filter;
182 return curr_block - filter;
183 }
184
dump_bpf_filter(struct sock_filter * filter,unsigned short len)185 void dump_bpf_filter(struct sock_filter *filter, unsigned short len)
186 {
187 int i = 0;
188
189 printf("len == %d\n", len);
190 printf("filter:\n");
191 for (i = 0; i < len; i++) {
192 printf("%d: \t{ code=%#x, jt=%u, jf=%u, k=%#x \t}\n", i,
193 filter[i].code, filter[i].jt, filter[i].jf, filter[i].k);
194 }
195 }
196
dump_bpf_prog(struct sock_fprog * fprog)197 void dump_bpf_prog(struct sock_fprog *fprog)
198 {
199 struct sock_filter *filter = fprog->filter;
200 unsigned short len = fprog->len;
201 dump_bpf_filter(filter, len);
202 }
203
bpf_resolve_jumps(struct bpf_labels * labels,struct sock_filter * filter,size_t len)204 int bpf_resolve_jumps(struct bpf_labels *labels, struct sock_filter *filter,
205 size_t len)
206 {
207 struct sock_filter *instr;
208 size_t i, offset;
209
210 if (len > BPF_MAXINSNS)
211 return -1;
212
213 /*
214 * Walk it once, backwards, to build the label table and do fixups.
215 * Since backward jumps are disallowed by BPF, this is easy.
216 */
217 for (i = 0; i < len; i++) {
218 offset = len - i - 1;
219 instr = &filter[offset];
220 if (instr->code != (BPF_JMP + BPF_JA))
221 continue;
222 switch ((instr->jt << 8) | instr->jf) {
223 case (JUMP_JT << 8) | JUMP_JF:
224 if (instr->k >= labels->count) {
225 warn("nonexistent label id: %u", instr->k);
226 return -1;
227 }
228 if (labels->labels[instr->k].location == 0xffffffff) {
229 warn("unresolved label: '%s'",
230 labels->labels[instr->k].label);
231 return -1;
232 }
233 instr->k =
234 labels->labels[instr->k].location - (offset + 1);
235 instr->jt = 0;
236 instr->jf = 0;
237 continue;
238 case (LABEL_JT << 8) | LABEL_JF:
239 if (labels->labels[instr->k].location != 0xffffffff) {
240 warn("duplicate label: '%s'",
241 labels->labels[instr->k].label);
242 return -1;
243 }
244 labels->labels[instr->k].location = offset;
245 instr->k = 0; /* Fall through. */
246 instr->jt = 0;
247 instr->jf = 0;
248 continue;
249 }
250 }
251 return 0;
252 }
253
254 /* Simple lookup table for labels. */
bpf_label_id(struct bpf_labels * labels,const char * label)255 int bpf_label_id(struct bpf_labels *labels, const char *label)
256 {
257 struct __bpf_label *begin = labels->labels, *end;
258 int id;
259 if (labels->count == 0) {
260 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
261 if (!begin->label) {
262 return -1;
263 }
264 begin->location = 0xffffffff;
265 labels->count++;
266 return 0;
267 }
268 end = begin + labels->count;
269 for (id = 0; begin < end; ++begin, ++id) {
270 if (!strcmp(label, begin->label)) {
271 return id;
272 }
273 }
274
275 /* The label wasn't found. Insert it only if there's space. */
276 if (labels->count == BPF_LABELS_MAX) {
277 return -1;
278 }
279 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
280 if (!begin->label) {
281 return -1;
282 }
283 begin->location = 0xffffffff;
284 labels->count++;
285 return id;
286 }
287
free_label_strings(struct bpf_labels * labels)288 void free_label_strings(struct bpf_labels *labels)
289 {
290 if (labels->count == 0)
291 return;
292
293 struct __bpf_label *begin = labels->labels, *end;
294
295 end = begin + labels->count;
296 for (; begin < end; ++begin) {
297 if (begin->label)
298 free((void *)(begin->label));
299 }
300
301 labels->count = 0;
302 }
303