• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 {
2 	"pointer/scalar confusion in state equality check (way 1)",
3 	.insns = {
4 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7 	BPF_LD_MAP_FD(BPF_REG_1, 0),
8 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
9 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
11 	BPF_JMP_A(1),
12 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
13 	BPF_JMP_A(0),
14 	BPF_EXIT_INSN(),
15 	},
16 	.fixup_map_hash_8b = { 3 },
17 	.result = ACCEPT,
18 	.retval = POINTER_VALUE,
19 	.result_unpriv = REJECT,
20 	.errstr_unpriv = "R0 leaks addr as return value"
21 },
22 {
23 	"pointer/scalar confusion in state equality check (way 2)",
24 	.insns = {
25 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
26 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
27 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
28 	BPF_LD_MAP_FD(BPF_REG_1, 0),
29 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
30 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
31 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
32 	BPF_JMP_A(1),
33 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
34 	BPF_EXIT_INSN(),
35 	},
36 	.fixup_map_hash_8b = { 3 },
37 	.result = ACCEPT,
38 	.retval = POINTER_VALUE,
39 	.result_unpriv = REJECT,
40 	.errstr_unpriv = "R0 leaks addr as return value"
41 },
42 {
43 	"liveness pruning and write screening",
44 	.insns = {
45 	/* Get an unknown value */
46 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
47 	/* branch conditions teach us nothing about R2 */
48 	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
49 	BPF_MOV64_IMM(BPF_REG_0, 0),
50 	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
51 	BPF_MOV64_IMM(BPF_REG_0, 0),
52 	BPF_EXIT_INSN(),
53 	},
54 	.errstr = "R0 !read_ok",
55 	.result = REJECT,
56 	.prog_type = BPF_PROG_TYPE_LWT_IN,
57 },
58 {
59 	"varlen_map_value_access pruning",
60 	.insns = {
61 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
62 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
63 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
64 	BPF_LD_MAP_FD(BPF_REG_1, 0),
65 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
66 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
67 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
68 	BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
69 	BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
70 	BPF_MOV32_IMM(BPF_REG_1, 0),
71 	BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
72 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
73 	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
74 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
75 	BPF_EXIT_INSN(),
76 	},
77 	.fixup_map_hash_48b = { 3 },
78 	.errstr_unpriv = "R0 leaks addr",
79 	.errstr = "R0 unbounded memory access",
80 	.result_unpriv = REJECT,
81 	.result = REJECT,
82 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
83 },
84 {
85 	"search pruning: all branches should be verified (nop operation)",
86 	.insns = {
87 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
88 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
89 		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
90 		BPF_LD_MAP_FD(BPF_REG_1, 0),
91 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
92 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
93 		BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
94 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
95 		BPF_MOV64_IMM(BPF_REG_4, 0),
96 		BPF_JMP_A(1),
97 		BPF_MOV64_IMM(BPF_REG_4, 1),
98 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
99 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
100 		BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
101 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
102 		BPF_MOV64_IMM(BPF_REG_6, 0),
103 		BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
104 		BPF_EXIT_INSN(),
105 	},
106 	.fixup_map_hash_8b = { 3 },
107 	.errstr = "R6 invalid mem access 'inv'",
108 	.result = REJECT,
109 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
110 },
111 {
112 	"search pruning: all branches should be verified (invalid stack access)",
113 	.insns = {
114 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
115 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
116 		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
117 		BPF_LD_MAP_FD(BPF_REG_1, 0),
118 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
119 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
120 		BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
121 		BPF_MOV64_IMM(BPF_REG_4, 0),
122 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
123 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
124 		BPF_JMP_A(1),
125 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
126 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
127 		BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
128 		BPF_EXIT_INSN(),
129 	},
130 	.fixup_map_hash_8b = { 3 },
131 	.errstr = "invalid read from stack off -16+0 size 8",
132 	.result = REJECT,
133 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
134 },
135 {
136 	"allocated_stack",
137 	.insns = {
138 		BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
139 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
140 		BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
141 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
142 		BPF_MOV64_IMM(BPF_REG_0, 0),
143 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
144 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
145 		BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
146 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
147 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
148 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
149 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
150 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
151 		BPF_EXIT_INSN(),
152 	},
153 	.result = ACCEPT,
154 	.result_unpriv = ACCEPT,
155 	.insn_processed = 15,
156 },
157 /* The test performs a conditional 64-bit write to a stack location
158  * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
159  * then data is read from fp[-8]. This sequence is unsafe.
160  *
161  * The test would be mistakenly marked as safe w/o dst register parent
162  * preservation in verifier.c:copy_register_state() function.
163  *
164  * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
165  * checkpoint state after conditional 64-bit assignment.
166  */
167 {
168 	"write tracking and register parent chain bug",
169 	.insns = {
170 	/* r6 = ktime_get_ns() */
171 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
172 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
173 	/* r0 = ktime_get_ns() */
174 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
175 	/* if r0 > r6 goto +1 */
176 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
177 	/* *(u64 *)(r10 - 8) = 0xdeadbeef */
178 	BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
179 	/* r1 = 42 */
180 	BPF_MOV64_IMM(BPF_REG_1, 42),
181 	/* *(u8 *)(r10 - 8) = r1 */
182 	BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
183 	/* r2 = *(u64 *)(r10 - 8) */
184 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
185 	/* exit(0) */
186 	BPF_MOV64_IMM(BPF_REG_0, 0),
187 	BPF_EXIT_INSN(),
188 	},
189 	.flags = BPF_F_TEST_STATE_FREQ,
190 	.errstr = "invalid read from stack off -8+1 size 8",
191 	.result = REJECT,
192 },
193