• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 {
2 	"runtime/jit: tail_call within bounds, prog once",
3 	.insns = {
4 	BPF_MOV64_IMM(BPF_REG_3, 0),
5 	BPF_LD_MAP_FD(BPF_REG_2, 0),
6 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
7 	BPF_MOV64_IMM(BPF_REG_0, 1),
8 	BPF_EXIT_INSN(),
9 	},
10 	.fixup_prog1 = { 1 },
11 	.result = ACCEPT,
12 	.retval = 42,
13 },
14 {
15 	"runtime/jit: tail_call within bounds, prog loop",
16 	.insns = {
17 	BPF_MOV64_IMM(BPF_REG_3, 1),
18 	BPF_LD_MAP_FD(BPF_REG_2, 0),
19 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
20 	BPF_MOV64_IMM(BPF_REG_0, 1),
21 	BPF_EXIT_INSN(),
22 	},
23 	.fixup_prog1 = { 1 },
24 	.result = ACCEPT,
25 	.retval = 41,
26 },
27 {
28 	"runtime/jit: tail_call within bounds, no prog",
29 	.insns = {
30 	BPF_MOV64_IMM(BPF_REG_3, 3),
31 	BPF_LD_MAP_FD(BPF_REG_2, 0),
32 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
33 	BPF_MOV64_IMM(BPF_REG_0, 1),
34 	BPF_EXIT_INSN(),
35 	},
36 	.fixup_prog1 = { 1 },
37 	.result = ACCEPT,
38 	.retval = 1,
39 },
40 {
41 	"runtime/jit: tail_call within bounds, key 2",
42 	.insns = {
43 	BPF_MOV64_IMM(BPF_REG_3, 2),
44 	BPF_LD_MAP_FD(BPF_REG_2, 0),
45 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
46 	BPF_MOV64_IMM(BPF_REG_0, 1),
47 	BPF_EXIT_INSN(),
48 	},
49 	.fixup_prog1 = { 1 },
50 	.result = ACCEPT,
51 	.retval = 24,
52 },
53 {
54 	"runtime/jit: tail_call within bounds, key 2 / key 2, first branch",
55 	.insns = {
56 	BPF_MOV64_IMM(BPF_REG_0, 13),
57 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
58 		    offsetof(struct __sk_buff, cb[0])),
59 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
60 		    offsetof(struct __sk_buff, cb[0])),
61 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
62 	BPF_MOV64_IMM(BPF_REG_3, 2),
63 	BPF_LD_MAP_FD(BPF_REG_2, 0),
64 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
65 	BPF_MOV64_IMM(BPF_REG_3, 2),
66 	BPF_LD_MAP_FD(BPF_REG_2, 0),
67 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
68 	BPF_MOV64_IMM(BPF_REG_0, 1),
69 	BPF_EXIT_INSN(),
70 	},
71 	.fixup_prog1 = { 5, 9 },
72 	.result = ACCEPT,
73 	.retval = 24,
74 },
75 {
76 	"runtime/jit: tail_call within bounds, key 2 / key 2, second branch",
77 	.insns = {
78 	BPF_MOV64_IMM(BPF_REG_0, 14),
79 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
80 		    offsetof(struct __sk_buff, cb[0])),
81 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
82 		    offsetof(struct __sk_buff, cb[0])),
83 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
84 	BPF_MOV64_IMM(BPF_REG_3, 2),
85 	BPF_LD_MAP_FD(BPF_REG_2, 0),
86 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
87 	BPF_MOV64_IMM(BPF_REG_3, 2),
88 	BPF_LD_MAP_FD(BPF_REG_2, 0),
89 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
90 	BPF_MOV64_IMM(BPF_REG_0, 1),
91 	BPF_EXIT_INSN(),
92 	},
93 	.fixup_prog1 = { 5, 9 },
94 	.result = ACCEPT,
95 	.retval = 24,
96 },
97 {
98 	"runtime/jit: tail_call within bounds, key 0 / key 2, first branch",
99 	.insns = {
100 	BPF_MOV64_IMM(BPF_REG_0, 13),
101 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
102 		    offsetof(struct __sk_buff, cb[0])),
103 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
104 		    offsetof(struct __sk_buff, cb[0])),
105 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
106 	BPF_MOV64_IMM(BPF_REG_3, 0),
107 	BPF_LD_MAP_FD(BPF_REG_2, 0),
108 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
109 	BPF_MOV64_IMM(BPF_REG_3, 2),
110 	BPF_LD_MAP_FD(BPF_REG_2, 0),
111 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
112 	BPF_MOV64_IMM(BPF_REG_0, 1),
113 	BPF_EXIT_INSN(),
114 	},
115 	.fixup_prog1 = { 5, 9 },
116 	.result = ACCEPT,
117 	.retval = 24,
118 },
119 {
120 	"runtime/jit: tail_call within bounds, key 0 / key 2, second branch",
121 	.insns = {
122 	BPF_MOV64_IMM(BPF_REG_0, 14),
123 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
124 		    offsetof(struct __sk_buff, cb[0])),
125 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
126 		    offsetof(struct __sk_buff, cb[0])),
127 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
128 	BPF_MOV64_IMM(BPF_REG_3, 0),
129 	BPF_LD_MAP_FD(BPF_REG_2, 0),
130 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
131 	BPF_MOV64_IMM(BPF_REG_3, 2),
132 	BPF_LD_MAP_FD(BPF_REG_2, 0),
133 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
134 	BPF_MOV64_IMM(BPF_REG_0, 1),
135 	BPF_EXIT_INSN(),
136 	},
137 	.fixup_prog1 = { 5, 9 },
138 	.result = ACCEPT,
139 	.retval = 42,
140 },
141 {
142 	"runtime/jit: tail_call within bounds, different maps, first branch",
143 	.insns = {
144 	BPF_MOV64_IMM(BPF_REG_0, 13),
145 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
146 		    offsetof(struct __sk_buff, cb[0])),
147 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
148 		    offsetof(struct __sk_buff, cb[0])),
149 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
150 	BPF_MOV64_IMM(BPF_REG_3, 0),
151 	BPF_LD_MAP_FD(BPF_REG_2, 0),
152 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
153 	BPF_MOV64_IMM(BPF_REG_3, 0),
154 	BPF_LD_MAP_FD(BPF_REG_2, 0),
155 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
156 	BPF_MOV64_IMM(BPF_REG_0, 1),
157 	BPF_EXIT_INSN(),
158 	},
159 	.fixup_prog1 = { 5 },
160 	.fixup_prog2 = { 9 },
161 	.result_unpriv = REJECT,
162 	.errstr_unpriv = "tail_call abusing map_ptr",
163 	.result = ACCEPT,
164 	.retval = 1,
165 },
166 {
167 	"runtime/jit: tail_call within bounds, different maps, second branch",
168 	.insns = {
169 	BPF_MOV64_IMM(BPF_REG_0, 14),
170 	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
171 		    offsetof(struct __sk_buff, cb[0])),
172 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
173 		    offsetof(struct __sk_buff, cb[0])),
174 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
175 	BPF_MOV64_IMM(BPF_REG_3, 0),
176 	BPF_LD_MAP_FD(BPF_REG_2, 0),
177 	BPF_JMP_IMM(BPF_JA, 0, 0, 3),
178 	BPF_MOV64_IMM(BPF_REG_3, 0),
179 	BPF_LD_MAP_FD(BPF_REG_2, 0),
180 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
181 	BPF_MOV64_IMM(BPF_REG_0, 1),
182 	BPF_EXIT_INSN(),
183 	},
184 	.fixup_prog1 = { 5 },
185 	.fixup_prog2 = { 9 },
186 	.result_unpriv = REJECT,
187 	.errstr_unpriv = "tail_call abusing map_ptr",
188 	.result = ACCEPT,
189 	.retval = 42,
190 },
191 {
192 	"runtime/jit: tail_call out of bounds",
193 	.insns = {
194 	BPF_MOV64_IMM(BPF_REG_3, 256),
195 	BPF_LD_MAP_FD(BPF_REG_2, 0),
196 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
197 	BPF_MOV64_IMM(BPF_REG_0, 2),
198 	BPF_EXIT_INSN(),
199 	},
200 	.fixup_prog1 = { 1 },
201 	.result = ACCEPT,
202 	.retval = 2,
203 },
204 {
205 	"runtime/jit: pass negative index to tail_call",
206 	.insns = {
207 	BPF_MOV64_IMM(BPF_REG_3, -1),
208 	BPF_LD_MAP_FD(BPF_REG_2, 0),
209 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
210 	BPF_MOV64_IMM(BPF_REG_0, 2),
211 	BPF_EXIT_INSN(),
212 	},
213 	.fixup_prog1 = { 1 },
214 	.result = ACCEPT,
215 	.retval = 2,
216 },
217 {
218 	"runtime/jit: pass > 32bit index to tail_call",
219 	.insns = {
220 	BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
221 	BPF_LD_MAP_FD(BPF_REG_2, 0),
222 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
223 	BPF_MOV64_IMM(BPF_REG_0, 2),
224 	BPF_EXIT_INSN(),
225 	},
226 	.fixup_prog1 = { 2 },
227 	.result = ACCEPT,
228 	.retval = 42,
229 	/* Verifier rewrite for unpriv skips tail call here. */
230 	.retval_unpriv = 2,
231 },
232