• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 {
2 	"unpriv: return pointer",
3 	.insns = {
4 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
5 	BPF_EXIT_INSN(),
6 	},
7 	.result = ACCEPT,
8 	.result_unpriv = REJECT,
9 	.errstr_unpriv = "R0 leaks addr",
10 	.retval = POINTER_VALUE,
11 },
12 {
13 	"unpriv: add const to pointer",
14 	.insns = {
15 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
16 	BPF_MOV64_IMM(BPF_REG_0, 0),
17 	BPF_EXIT_INSN(),
18 	},
19 	.result = ACCEPT,
20 },
21 {
22 	"unpriv: add pointer to pointer",
23 	.insns = {
24 	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
25 	BPF_MOV64_IMM(BPF_REG_0, 0),
26 	BPF_EXIT_INSN(),
27 	},
28 	.result = REJECT,
29 	.errstr = "R1 pointer += pointer",
30 },
31 {
32 	"unpriv: neg pointer",
33 	.insns = {
34 	BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
35 	BPF_MOV64_IMM(BPF_REG_0, 0),
36 	BPF_EXIT_INSN(),
37 	},
38 	.result = ACCEPT,
39 	.result_unpriv = REJECT,
40 	.errstr_unpriv = "R1 pointer arithmetic",
41 },
42 {
43 	"unpriv: cmp pointer with const",
44 	.insns = {
45 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
46 	BPF_MOV64_IMM(BPF_REG_0, 0),
47 	BPF_EXIT_INSN(),
48 	},
49 	.result = ACCEPT,
50 	.result_unpriv = REJECT,
51 	.errstr_unpriv = "R1 pointer comparison",
52 },
53 {
54 	"unpriv: cmp pointer with pointer",
55 	.insns = {
56 	BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
57 	BPF_MOV64_IMM(BPF_REG_0, 0),
58 	BPF_EXIT_INSN(),
59 	},
60 	.result = ACCEPT,
61 	.result_unpriv = REJECT,
62 	.errstr_unpriv = "R10 pointer comparison",
63 },
64 {
65 	"unpriv: check that printk is disallowed",
66 	.insns = {
67 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
68 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
69 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
70 	BPF_MOV64_IMM(BPF_REG_2, 8),
71 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
72 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
73 	BPF_MOV64_IMM(BPF_REG_0, 0),
74 	BPF_EXIT_INSN(),
75 	},
76 	.errstr_unpriv = "unknown func bpf_trace_printk#6",
77 	.result_unpriv = REJECT,
78 	.result = ACCEPT,
79 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
80 },
81 {
82 	"unpriv: pass pointer to helper function",
83 	.insns = {
84 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
85 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
86 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
87 	BPF_LD_MAP_FD(BPF_REG_1, 0),
88 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
89 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
90 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
91 	BPF_MOV64_IMM(BPF_REG_0, 0),
92 	BPF_EXIT_INSN(),
93 	},
94 	.fixup_map_hash_8b = { 3 },
95 	.errstr_unpriv = "R4 leaks addr",
96 	.result_unpriv = REJECT,
97 	.result = ACCEPT,
98 },
99 {
100 	"unpriv: indirectly pass pointer on stack to helper function",
101 	.insns = {
102 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
103 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
104 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
105 	BPF_LD_MAP_FD(BPF_REG_1, 0),
106 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 	BPF_MOV64_IMM(BPF_REG_0, 0),
108 	BPF_EXIT_INSN(),
109 	},
110 	.fixup_map_hash_8b = { 3 },
111 	.errstr = "invalid indirect read from stack off -8+0 size 8",
112 	.result = REJECT,
113 },
114 {
115 	"unpriv: mangle pointer on stack 1",
116 	.insns = {
117 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
118 	BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
119 	BPF_MOV64_IMM(BPF_REG_0, 0),
120 	BPF_EXIT_INSN(),
121 	},
122 	.errstr_unpriv = "attempt to corrupt spilled",
123 	.result_unpriv = REJECT,
124 	.result = ACCEPT,
125 },
126 {
127 	"unpriv: mangle pointer on stack 2",
128 	.insns = {
129 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
130 	BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
131 	BPF_MOV64_IMM(BPF_REG_0, 0),
132 	BPF_EXIT_INSN(),
133 	},
134 	.errstr_unpriv = "attempt to corrupt spilled",
135 	.result_unpriv = REJECT,
136 	.result = ACCEPT,
137 },
138 {
139 	"unpriv: read pointer from stack in small chunks",
140 	.insns = {
141 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
142 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
143 	BPF_MOV64_IMM(BPF_REG_0, 0),
144 	BPF_EXIT_INSN(),
145 	},
146 	.errstr = "invalid size",
147 	.result = REJECT,
148 },
149 {
150 	"unpriv: write pointer into ctx",
151 	.insns = {
152 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
153 	BPF_MOV64_IMM(BPF_REG_0, 0),
154 	BPF_EXIT_INSN(),
155 	},
156 	.errstr_unpriv = "R1 leaks addr",
157 	.result_unpriv = REJECT,
158 	.errstr = "invalid bpf_context access",
159 	.result = REJECT,
160 },
161 {
162 	"unpriv: spill/fill of ctx",
163 	.insns = {
164 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
165 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
166 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
167 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
168 	BPF_MOV64_IMM(BPF_REG_0, 0),
169 	BPF_EXIT_INSN(),
170 	},
171 	.result = ACCEPT,
172 },
173 {
174 	"unpriv: spill/fill of ctx 2",
175 	.insns = {
176 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
177 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
178 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
179 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
180 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
181 	BPF_MOV64_IMM(BPF_REG_0, 0),
182 	BPF_EXIT_INSN(),
183 	},
184 	.result = ACCEPT,
185 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
186 },
187 {
188 	"unpriv: spill/fill of ctx 3",
189 	.insns = {
190 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
191 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
192 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
193 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
194 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
195 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
196 	BPF_EXIT_INSN(),
197 	},
198 	.result = REJECT,
199 	.errstr = "R1 type=fp expected=ctx",
200 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
201 },
202 {
203 	"unpriv: spill/fill of ctx 4",
204 	.insns = {
205 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
206 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
207 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
208 	BPF_MOV64_IMM(BPF_REG_0, 1),
209 	BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
210 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
211 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
212 	BPF_EXIT_INSN(),
213 	},
214 	.result = REJECT,
215 	.errstr = "R1 type=inv expected=ctx",
216 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
217 },
218 {
219 	"unpriv: spill/fill of different pointers stx",
220 	.insns = {
221 	BPF_MOV64_IMM(BPF_REG_3, 42),
222 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
223 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
224 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
225 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
226 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
227 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
228 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
229 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
230 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
231 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
232 		    offsetof(struct __sk_buff, mark)),
233 	BPF_MOV64_IMM(BPF_REG_0, 0),
234 	BPF_EXIT_INSN(),
235 	},
236 	.result = REJECT,
237 	.errstr = "same insn cannot be used with different pointers",
238 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
239 },
240 {
241 	"unpriv: spill/fill of different pointers stx - ctx and sock",
242 	.insns = {
243 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
244 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
245 	BPF_SK_LOOKUP(sk_lookup_tcp),
246 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
247 	/* u64 foo; */
248 	/* void *target = &foo; */
249 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
250 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
251 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
252 	/* if (skb == NULL) *target = sock; */
253 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
254 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
255 	/* else *target = skb; */
256 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
257 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
258 	/* struct __sk_buff *skb = *target; */
259 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
260 	/* skb->mark = 42; */
261 	BPF_MOV64_IMM(BPF_REG_3, 42),
262 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
263 		    offsetof(struct __sk_buff, mark)),
264 	/* if (sk) bpf_sk_release(sk) */
265 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
266 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
267 	BPF_MOV64_IMM(BPF_REG_0, 0),
268 	BPF_EXIT_INSN(),
269 	},
270 	.result = REJECT,
271 	.errstr = "type=ctx expected=sock",
272 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
273 },
274 {
275 	"unpriv: spill/fill of different pointers stx - leak sock",
276 	.insns = {
277 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
278 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
279 	BPF_SK_LOOKUP(sk_lookup_tcp),
280 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
281 	/* u64 foo; */
282 	/* void *target = &foo; */
283 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
284 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
285 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
286 	/* if (skb == NULL) *target = sock; */
287 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
288 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
289 	/* else *target = skb; */
290 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
291 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
292 	/* struct __sk_buff *skb = *target; */
293 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
294 	/* skb->mark = 42; */
295 	BPF_MOV64_IMM(BPF_REG_3, 42),
296 	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
297 		    offsetof(struct __sk_buff, mark)),
298 	BPF_EXIT_INSN(),
299 	},
300 	.result = REJECT,
301 	//.errstr = "same insn cannot be used with different pointers",
302 	.errstr = "Unreleased reference",
303 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
304 },
305 {
306 	"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
307 	.insns = {
308 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
309 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
310 	BPF_SK_LOOKUP(sk_lookup_tcp),
311 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
312 	/* u64 foo; */
313 	/* void *target = &foo; */
314 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
315 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
316 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
317 	/* if (skb) *target = skb */
318 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
319 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
320 	/* else *target = sock */
321 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
322 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
323 	/* struct bpf_sock *sk = *target; */
324 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
325 	/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
326 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
327 		BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
328 			    offsetof(struct bpf_sock, mark)),
329 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
330 	BPF_MOV64_IMM(BPF_REG_0, 0),
331 	BPF_EXIT_INSN(),
332 	},
333 	.result = REJECT,
334 	.errstr = "same insn cannot be used with different pointers",
335 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
336 },
337 {
338 	"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
339 	.insns = {
340 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
341 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
342 	BPF_SK_LOOKUP(sk_lookup_tcp),
343 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
344 	/* u64 foo; */
345 	/* void *target = &foo; */
346 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
347 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
348 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
349 	/* if (skb) *target = skb */
350 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
351 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
352 	/* else *target = sock */
353 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
354 		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
355 	/* struct bpf_sock *sk = *target; */
356 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
357 	/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
358 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
359 		BPF_MOV64_IMM(BPF_REG_3, 42),
360 		BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
361 			    offsetof(struct bpf_sock, mark)),
362 		BPF_EMIT_CALL(BPF_FUNC_sk_release),
363 	BPF_MOV64_IMM(BPF_REG_0, 0),
364 	BPF_EXIT_INSN(),
365 	},
366 	.result = REJECT,
367 	//.errstr = "same insn cannot be used with different pointers",
368 	.errstr = "cannot write into sock",
369 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
370 },
371 {
372 	"unpriv: spill/fill of different pointers ldx",
373 	.insns = {
374 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
375 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
376 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
377 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
378 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
379 		      -(__s32)offsetof(struct bpf_perf_event_data,
380 				       sample_period) - 8),
381 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
382 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
383 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
384 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
385 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
386 		    offsetof(struct bpf_perf_event_data, sample_period)),
387 	BPF_MOV64_IMM(BPF_REG_0, 0),
388 	BPF_EXIT_INSN(),
389 	},
390 	.result = REJECT,
391 	.errstr = "same insn cannot be used with different pointers",
392 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
393 },
394 {
395 	"unpriv: write pointer into map elem value",
396 	.insns = {
397 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
398 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
399 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
400 	BPF_LD_MAP_FD(BPF_REG_1, 0),
401 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
402 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
403 	BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
404 	BPF_EXIT_INSN(),
405 	},
406 	.fixup_map_hash_8b = { 3 },
407 	.errstr_unpriv = "R0 leaks addr",
408 	.result_unpriv = REJECT,
409 	.result = ACCEPT,
410 },
411 {
412 	"alu32: mov u32 const",
413 	.insns = {
414 	BPF_MOV32_IMM(BPF_REG_7, 0),
415 	BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
416 	BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
417 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
418 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
419 	BPF_EXIT_INSN(),
420 	},
421 	.result = ACCEPT,
422 	.retval = 0,
423 },
424 {
425 	"unpriv: partial copy of pointer",
426 	.insns = {
427 	BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
428 	BPF_MOV64_IMM(BPF_REG_0, 0),
429 	BPF_EXIT_INSN(),
430 	},
431 	.errstr_unpriv = "R10 partial copy",
432 	.result_unpriv = REJECT,
433 	.result = ACCEPT,
434 },
435 {
436 	"unpriv: pass pointer to tail_call",
437 	.insns = {
438 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
439 	BPF_LD_MAP_FD(BPF_REG_2, 0),
440 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
441 	BPF_MOV64_IMM(BPF_REG_0, 0),
442 	BPF_EXIT_INSN(),
443 	},
444 	.fixup_prog1 = { 1 },
445 	.errstr_unpriv = "R3 leaks addr into helper",
446 	.result_unpriv = REJECT,
447 	.result = ACCEPT,
448 },
449 {
450 	"unpriv: cmp map pointer with zero",
451 	.insns = {
452 	BPF_MOV64_IMM(BPF_REG_1, 0),
453 	BPF_LD_MAP_FD(BPF_REG_1, 0),
454 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
455 	BPF_MOV64_IMM(BPF_REG_0, 0),
456 	BPF_EXIT_INSN(),
457 	},
458 	.fixup_map_hash_8b = { 1 },
459 	.errstr_unpriv = "R1 pointer comparison",
460 	.result_unpriv = REJECT,
461 	.result = ACCEPT,
462 },
463 {
464 	"unpriv: write into frame pointer",
465 	.insns = {
466 	BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
467 	BPF_MOV64_IMM(BPF_REG_0, 0),
468 	BPF_EXIT_INSN(),
469 	},
470 	.errstr = "frame pointer is read only",
471 	.result = REJECT,
472 },
473 {
474 	"unpriv: spill/fill frame pointer",
475 	.insns = {
476 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
477 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
478 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
479 	BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
480 	BPF_MOV64_IMM(BPF_REG_0, 0),
481 	BPF_EXIT_INSN(),
482 	},
483 	.errstr = "frame pointer is read only",
484 	.result = REJECT,
485 },
486 {
487 	"unpriv: cmp of frame pointer",
488 	.insns = {
489 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
490 	BPF_MOV64_IMM(BPF_REG_0, 0),
491 	BPF_EXIT_INSN(),
492 	},
493 	.errstr_unpriv = "R10 pointer comparison",
494 	.result_unpriv = REJECT,
495 	.result = ACCEPT,
496 },
497 {
498 	"unpriv: adding of fp",
499 	.insns = {
500 	BPF_MOV64_IMM(BPF_REG_0, 0),
501 	BPF_MOV64_IMM(BPF_REG_1, 0),
502 	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
503 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
504 	BPF_EXIT_INSN(),
505 	},
506 	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
507 	.result_unpriv = REJECT,
508 	.result = ACCEPT,
509 },
510 {
511 	"unpriv: cmp of stack pointer",
512 	.insns = {
513 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
514 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
515 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
516 	BPF_MOV64_IMM(BPF_REG_0, 0),
517 	BPF_EXIT_INSN(),
518 	},
519 	.errstr_unpriv = "R2 pointer comparison",
520 	.result_unpriv = REJECT,
521 	.result = ACCEPT,
522 },
523