• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 {
2 	"unpriv: return pointer",
3 	.insns = {
4 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
5 	BPF_EXIT_INSN(),
6 	},
7 	.result = ACCEPT,
8 	.result_unpriv = REJECT,
9 	.errstr_unpriv = "R0 leaks addr",
10 	.retval = POINTER_VALUE,
11 },
12 {
13 	"unpriv: add const to pointer",
14 	.insns = {
15 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
16 	BPF_MOV64_IMM(BPF_REG_0, 0),
17 	BPF_EXIT_INSN(),
18 	},
19 	.result = ACCEPT,
20 },
21 {
22 	"unpriv: add pointer to pointer",
23 	.insns = {
24 	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
25 	BPF_MOV64_IMM(BPF_REG_0, 0),
26 	BPF_EXIT_INSN(),
27 	},
28 	.result = REJECT,
29 	.errstr = "R1 pointer += pointer",
30 },
31 {
32 	"unpriv: neg pointer",
33 	.insns = {
34 	BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
35 	BPF_MOV64_IMM(BPF_REG_0, 0),
36 	BPF_EXIT_INSN(),
37 	},
38 	.result = ACCEPT,
39 	.result_unpriv = REJECT,
40 	.errstr_unpriv = "R1 pointer arithmetic",
41 },
42 {
43 	"unpriv: cmp pointer with const",
44 	.insns = {
45 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
46 	BPF_MOV64_IMM(BPF_REG_0, 0),
47 	BPF_EXIT_INSN(),
48 	},
49 	.result = ACCEPT,
50 	.result_unpriv = REJECT,
51 	.errstr_unpriv = "R1 pointer comparison",
52 },
53 {
54 	"unpriv: cmp pointer with pointer",
55 	.insns = {
56 	BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
57 	BPF_MOV64_IMM(BPF_REG_0, 0),
58 	BPF_EXIT_INSN(),
59 	},
60 	.result = ACCEPT,
61 	.result_unpriv = REJECT,
62 	.errstr_unpriv = "R10 pointer comparison",
63 },
64 {
65 	"unpriv: check that printk is disallowed",
66 	.insns = {
67 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
68 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
69 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
70 	BPF_MOV64_IMM(BPF_REG_2, 8),
71 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
72 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
73 	BPF_MOV64_IMM(BPF_REG_0, 0),
74 	BPF_EXIT_INSN(),
75 	},
76 	.errstr_unpriv = "unknown func bpf_trace_printk#6",
77 	.result_unpriv = REJECT,
78 	.result = ACCEPT,
79 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
80 },
81 {
82 	"unpriv: pass pointer to helper function",
83 	.insns = {
84 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
85 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
86 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
87 	BPF_LD_MAP_FD(BPF_REG_1, 0),
88 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
89 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
90 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
91 	BPF_MOV64_IMM(BPF_REG_0, 0),
92 	BPF_EXIT_INSN(),
93 	},
94 	.fixup_map_hash_8b = { 3 },
95 	.errstr_unpriv = "R4 leaks addr",
96 	.result_unpriv = REJECT,
97 	.result = ACCEPT,
98 },
99 {
100 	"unpriv: indirectly pass pointer on stack to helper function",
101 	.insns = {
102 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
103 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
104 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
105 	BPF_LD_MAP_FD(BPF_REG_1, 0),
106 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 	BPF_MOV64_IMM(BPF_REG_0, 0),
108 	BPF_EXIT_INSN(),
109 	},
110 	.fixup_map_hash_8b = { 3 },
111 	.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
112 	.result_unpriv = REJECT,
113 	.result = ACCEPT,
114 },
115 {
116 	"unpriv: mangle pointer on stack 1",
117 	.insns = {
118 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
119 	BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
120 	BPF_MOV64_IMM(BPF_REG_0, 0),
121 	BPF_EXIT_INSN(),
122 	},
123 	.errstr_unpriv = "attempt to corrupt spilled",
124 	.result_unpriv = REJECT,
125 	.result = ACCEPT,
126 },
127 {
128 	"unpriv: mangle pointer on stack 2",
129 	.insns = {
130 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
131 	BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
132 	BPF_MOV64_IMM(BPF_REG_0, 0),
133 	BPF_EXIT_INSN(),
134 	},
135 	.errstr_unpriv = "attempt to corrupt spilled",
136 	.result_unpriv = REJECT,
137 	.result = ACCEPT,
138 },
139 {
140 	"unpriv: read pointer from stack in small chunks",
141 	.insns = {
142 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
143 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
144 	BPF_MOV64_IMM(BPF_REG_0, 0),
145 	BPF_EXIT_INSN(),
146 	},
147 	.errstr = "invalid size",
148 	.result = REJECT,
149 },
150 {
151 	"unpriv: write pointer into ctx",
152 	.insns = {
153 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
154 	BPF_MOV64_IMM(BPF_REG_0, 0),
155 	BPF_EXIT_INSN(),
156 	},
157 	.errstr_unpriv = "R1 leaks addr",
158 	.result_unpriv = REJECT,
159 	.errstr = "invalid bpf_context access",
160 	.result = REJECT,
161 },
162 {
163 	"unpriv: spill/fill of ctx",
164 	.insns = {
165 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
166 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
167 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
168 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
169 	BPF_MOV64_IMM(BPF_REG_0, 0),
170 	BPF_EXIT_INSN(),
171 	},
172 	.result = ACCEPT,
173 },
174 {
175 	"unpriv: spill/fill of ctx 2",
176 	.insns = {
177 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
178 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
179 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
180 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
181 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
182 	BPF_MOV64_IMM(BPF_REG_0, 0),
183 	BPF_EXIT_INSN(),
184 	},
185 	.result = ACCEPT,
186 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
187 },
188 {
189 	"unpriv: spill/fill of ctx 3",
190 	.insns = {
191 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
192 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
193 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
194 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
195 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
196 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
197 	BPF_EXIT_INSN(),
198 	},
199 	.result = REJECT,
200 	.errstr = "R1 type=fp expected=ctx",
201 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
202 },
203 {
204 	"unpriv: spill/fill of ctx 4",
205 	.insns = {
206 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
207 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
208 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
209 	BPF_MOV64_IMM(BPF_REG_0, 1),
210 	BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
211 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
212 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
213 	BPF_EXIT_INSN(),
214 	},
215 	.result = REJECT,
216 	.errstr = "R1 type=inv expected=ctx",
217 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
218 },
219 {
220 	"unpriv: spill/fill of different pointers stx",
221 	.insns = {
222 	BPF_MOV64_IMM(BPF_REG_3, 42),
223 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
224 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
225 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
226 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
227 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
228 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
229 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
230 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
231 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
232 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
233 		    offsetof(struct __sk_buff, mark)),
234 	BPF_MOV64_IMM(BPF_REG_0, 0),
235 	BPF_EXIT_INSN(),
236 	},
237 	.result = REJECT,
238 	.errstr = "same insn cannot be used with different pointers",
239 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
240 },
241 {
242 	"unpriv: spill/fill of different pointers stx - ctx and sock",
243 	.insns = {
244 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
245 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
246 	BPF_SK_LOOKUP(sk_lookup_tcp),
247 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
248 	/* u64 foo; */
249 	/* void *target = &foo; */
250 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
251 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
252 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
253 	/* if (skb == NULL) *target = sock; */
254 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
255 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
256 	/* else *target = skb; */
257 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
258 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
259 	/* struct __sk_buff *skb = *target; */
260 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
261 	/* skb->mark = 42; */
262 	BPF_MOV64_IMM(BPF_REG_3, 42),
263 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
264 		    offsetof(struct __sk_buff, mark)),
265 	/* if (sk) bpf_sk_release(sk) */
266 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
267 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
268 	BPF_MOV64_IMM(BPF_REG_0, 0),
269 	BPF_EXIT_INSN(),
270 	},
271 	.result = REJECT,
272 	.errstr = "type=ctx expected=sock",
273 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
274 },
275 {
276 	"unpriv: spill/fill of different pointers stx - leak sock",
277 	.insns = {
278 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
279 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
280 	BPF_SK_LOOKUP(sk_lookup_tcp),
281 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
282 	/* u64 foo; */
283 	/* void *target = &foo; */
284 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
285 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
286 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
287 	/* if (skb == NULL) *target = sock; */
288 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
289 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
290 	/* else *target = skb; */
291 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
292 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
293 	/* struct __sk_buff *skb = *target; */
294 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
295 	/* skb->mark = 42; */
296 	BPF_MOV64_IMM(BPF_REG_3, 42),
297 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
298 		    offsetof(struct __sk_buff, mark)),
299 	BPF_EXIT_INSN(),
300 	},
301 	.result = REJECT,
302 	//.errstr = "same insn cannot be used with different pointers",
303 	.errstr = "Unreleased reference",
304 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
305 },
306 {
307 	"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
308 	.insns = {
309 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
310 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
311 	BPF_SK_LOOKUP(sk_lookup_tcp),
312 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
313 	/* u64 foo; */
314 	/* void *target = &foo; */
315 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
316 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
317 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
318 	/* if (skb) *target = skb */
319 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
320 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
321 	/* else *target = sock */
322 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
323 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
324 	/* struct bpf_sock *sk = *target; */
325 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
326 	/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
327 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
328 		BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
329 			    offsetof(struct bpf_sock, mark)),
330 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
331 	BPF_MOV64_IMM(BPF_REG_0, 0),
332 	BPF_EXIT_INSN(),
333 	},
334 	.result = REJECT,
335 	.errstr = "same insn cannot be used with different pointers",
336 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
337 },
338 {
339 	"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
340 	.insns = {
341 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
342 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
343 	BPF_SK_LOOKUP(sk_lookup_tcp),
344 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
345 	/* u64 foo; */
346 	/* void *target = &foo; */
347 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
348 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
349 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
350 	/* if (skb) *target = skb */
351 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
352 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
353 	/* else *target = sock */
354 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
355 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
356 	/* struct bpf_sock *sk = *target; */
357 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
358 	/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
359 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
360 		BPF_MOV64_IMM(BPF_REG_3, 42),
361 		BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
362 			    offsetof(struct bpf_sock, mark)),
363 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
364 	BPF_MOV64_IMM(BPF_REG_0, 0),
365 	BPF_EXIT_INSN(),
366 	},
367 	.result = REJECT,
368 	//.errstr = "same insn cannot be used with different pointers",
369 	.errstr = "cannot write into sock",
370 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
371 },
372 {
373 	"unpriv: spill/fill of different pointers ldx",
374 	.insns = {
375 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
376 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
377 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
378 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
379 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
380 		      -(__s32)offsetof(struct bpf_perf_event_data,
381 				       sample_period) - 8),
382 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
383 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
384 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
385 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
386 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
387 		    offsetof(struct bpf_perf_event_data, sample_period)),
388 	BPF_MOV64_IMM(BPF_REG_0, 0),
389 	BPF_EXIT_INSN(),
390 	},
391 	.result = REJECT,
392 	.errstr = "same insn cannot be used with different pointers",
393 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
394 },
395 {
396 	"unpriv: write pointer into map elem value",
397 	.insns = {
398 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
399 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
400 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
401 	BPF_LD_MAP_FD(BPF_REG_1, 0),
402 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
403 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
404 	BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
405 	BPF_EXIT_INSN(),
406 	},
407 	.fixup_map_hash_8b = { 3 },
408 	.errstr_unpriv = "R0 leaks addr",
409 	.result_unpriv = REJECT,
410 	.result = ACCEPT,
411 },
412 {
413 	"alu32: mov u32 const",
414 	.insns = {
415 	BPF_MOV32_IMM(BPF_REG_7, 0),
416 	BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
417 	BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
418 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
419 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
420 	BPF_EXIT_INSN(),
421 	},
422 	.errstr_unpriv = "R7 invalid mem access 'inv'",
423 	.result_unpriv = REJECT,
424 	.result = ACCEPT,
425 	.retval = 0,
426 },
427 {
428 	"unpriv: partial copy of pointer",
429 	.insns = {
430 	BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
431 	BPF_MOV64_IMM(BPF_REG_0, 0),
432 	BPF_EXIT_INSN(),
433 	},
434 	.errstr_unpriv = "R10 partial copy",
435 	.result_unpriv = REJECT,
436 	.result = ACCEPT,
437 },
438 {
439 	"unpriv: pass pointer to tail_call",
440 	.insns = {
441 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
442 	BPF_LD_MAP_FD(BPF_REG_2, 0),
443 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
444 	BPF_MOV64_IMM(BPF_REG_0, 0),
445 	BPF_EXIT_INSN(),
446 	},
447 	.fixup_prog1 = { 1 },
448 	.errstr_unpriv = "R3 leaks addr into helper",
449 	.result_unpriv = REJECT,
450 	.result = ACCEPT,
451 },
452 {
453 	"unpriv: cmp map pointer with zero",
454 	.insns = {
455 	BPF_MOV64_IMM(BPF_REG_1, 0),
456 	BPF_LD_MAP_FD(BPF_REG_1, 0),
457 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
458 	BPF_MOV64_IMM(BPF_REG_0, 0),
459 	BPF_EXIT_INSN(),
460 	},
461 	.fixup_map_hash_8b = { 1 },
462 	.errstr_unpriv = "R1 pointer comparison",
463 	.result_unpriv = REJECT,
464 	.result = ACCEPT,
465 },
466 {
467 	"unpriv: write into frame pointer",
468 	.insns = {
469 	BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
470 	BPF_MOV64_IMM(BPF_REG_0, 0),
471 	BPF_EXIT_INSN(),
472 	},
473 	.errstr = "frame pointer is read only",
474 	.result = REJECT,
475 },
476 {
477 	"unpriv: spill/fill frame pointer",
478 	.insns = {
479 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
480 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
481 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
482 	BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
483 	BPF_MOV64_IMM(BPF_REG_0, 0),
484 	BPF_EXIT_INSN(),
485 	},
486 	.errstr = "frame pointer is read only",
487 	.result = REJECT,
488 },
489 {
490 	"unpriv: cmp of frame pointer",
491 	.insns = {
492 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
493 	BPF_MOV64_IMM(BPF_REG_0, 0),
494 	BPF_EXIT_INSN(),
495 	},
496 	.errstr_unpriv = "R10 pointer comparison",
497 	.result_unpriv = REJECT,
498 	.result = ACCEPT,
499 },
500 {
501 	"unpriv: adding of fp, reg",
502 	.insns = {
503 	BPF_MOV64_IMM(BPF_REG_0, 0),
504 	BPF_MOV64_IMM(BPF_REG_1, 0),
505 	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
506 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
507 	BPF_EXIT_INSN(),
508 	},
509 	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
510 	.result_unpriv = REJECT,
511 	.result = ACCEPT,
512 },
513 {
514 	"unpriv: adding of fp, imm",
515 	.insns = {
516 	BPF_MOV64_IMM(BPF_REG_0, 0),
517 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
518 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
519 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
520 	BPF_EXIT_INSN(),
521 	},
522 	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
523 	.result_unpriv = REJECT,
524 	.result = ACCEPT,
525 },
526 {
527 	"unpriv: cmp of stack pointer",
528 	.insns = {
529 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
530 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
531 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
532 	BPF_MOV64_IMM(BPF_REG_0, 0),
533 	BPF_EXIT_INSN(),
534 	},
535 	.errstr_unpriv = "R2 pointer comparison",
536 	.result_unpriv = REJECT,
537 	.result = ACCEPT,
538 },
539