Lines Matching refs:e
71 builtins[ntoh] = function (e, dst, a, w)
74 w = w or ffi.sizeof(e.V[a].type)*8
78 e.vcopy(dst, a)
79 e.emit(BPF.ALU + BPF.END + BPF.TO_BE, e.vreg(dst), 0, 0, w)
81 builtins[hton] = function (e, dst, a, w)
82 w = w or ffi.sizeof(e.V[a].type)*8
86 e.vcopy(dst, a)
87 e.emit(BPF.ALU + BPF.END + BPF.TO_LE, e.vreg(dst), 0, 0, w)
101 builtins[xadd] = function (e, ret, a, b, off)
102 local vinfo = e.V[a].const
106 if e.V[off] and type(e.V[off].const) == 'string' then
107 local ct, field = vinfo.__dissector, e.V[off].const
114 local src_reg = e.vreg(b)
115 local dst_reg = e.vreg(a)
117 e.vset(ret)
118 e.vreg(ret, 0, true, ffi.typeof('int32_t'))
120 if not e.V[a].source or e.V[a].source:find('_or_null', 1, true) then
121 e.emit(BPF.JMP + BPF.JEQ + BPF.K, dst_reg, 0, 1, 0) -- if (dst != NULL)
123 e.emit(BPF.XADD + BPF.STX + const_width[w], dst_reg, src_reg, off or 0, 0)
128 builtins[probe_read] = function (e, ret, dst, src, vtype, ofs)
129 e.reg_alloc(e.tmpvar, 1)
133 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
134 if not e.V[dst].const or not e.V[dst].const.__base > 0 then
135 builtins[ffi.new](e, dst, vtype) -- Allocate stack memory
137 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
139 e.reg_alloc(e.tmpvar, 2)
141 vtype = cdef.typename(e.V[dst].type)
146 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, w)
148 if e.V[src].reg then
149 e.reg_alloc(e.tmpvar, 3) -- Copy from original register
150 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, e.V[src].reg, 0, 0)
152 e.vreg(src, 3)
153 e.reg_spill(src) -- Spill to avoid overwriting
156 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 3, 0, 0, ofs)
159 ret = ret or e.tmpvar
160 e.vset(ret)
161 e.vreg(ret, 0, true, ffi.typeof('int32_t'))
162 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
163 e.V[e.tmpvar].reg = nil -- Free temporary registers
166 builtins[ffi.cast] = function (e, dst, ct, x)
167 assert(e.V[ct].const, 'ffi.cast(ctype, x) called with bad ctype')
168 e.vcopy(dst, x)
169 if e.V[x].const and type(e.V[x].const) == 'table' then
170 e.V[dst].const.__dissector = ffi.typeof(e.V[ct].const)
172 e.V[dst].type = ffi.typeof(e.V[ct].const)
179 if ffi.typeof(e.V[ct].const) == ffi.typeof('struct pt_regs') then
180 e.V[dst].source = 'ptr_to_probe'
184 builtins[ffi.new] = function (e, dst, ct, x)
186 ct = ffi.typeof(e.V[ct].const) -- Get ctype from variable
190 e.vset(dst, nil, ct)
191 e.V[dst].source = 'ptr_to_stack'
192 e.V[dst].const = {__base = e.valloc(ffi.sizeof(ct), true), __dissector = ct}
197 e.V[dst].const.__dissector = ffi.typeof(elem_type)
201 builtins[ffi.copy] = function (e, ret, dst, src)
202 assert(cdef.isptr(e.V[dst].type), 'ffi.copy(dst, src) - dst MUST be a pointer type')
203 assert(cdef.isptr(e.V[src].type), 'ffi.copy(dst, src) - src MUST be a pointer type')
207 if e.V[src].source and e.V[src].source:find('ptr_to_probe', 1, true) then
208 e.reg_alloc(e.tmpvar, 1)
211 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
212 if e.V[dst].spill then
213 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].spill)
214 elseif e.V[dst].const.__base then
215 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
218 local dst_tname = cdef.typename(e.V[dst].type)
220 e.reg_alloc(e.tmpvar, 2)
221 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(dst_tname))
223 if e.V[src].reg then
224 e.reg_alloc(e.tmpvar, 3) -- Copy from original register
225 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, e.V[src].reg, 0, 0)
227 e.vreg(src, 3)
228 e.reg_spill(src) -- Spill to avoid overwriting
231 e.vset(ret)
232 e.vreg(ret, 0, true, ffi.typeof('int32_t'))
233 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
234 e.V[e.tmpvar].reg = nil -- Free temporary registers
235 elseif e.V[src].const and e.V[src].const.__map then
237 elseif e.V[src].const and e.V[src].const.__dissector then
248 builtins[print] = function (e, ret, fmt, a1, a2, a3)
250 e.reg_alloc(e.V[e.tmpvar], 1)
251 e.reg_alloc(e.V[e.tmpvar+1], 1)
252 if type(e.V[fmt].const) == 'string' then
253 local src = e.V[fmt].const
255 local dst = e.valloc(len, src)
257 e.V[fmt].const = {__base=dst}
258 e.V[fmt].type = ffi.typeof('char ['..len..']')
259 elseif e.V[fmt].const.__base then -- luacheck: ignore
263 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
264 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[fmt].const.__base)
265 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(e.V[fmt].type))
270 e.vcopy(e.tmpvar, arg) -- Copy variable
271 e.vreg(e.tmpvar, 3+i-1) -- Materialize it in arg register
275 e.vset(ret)
276 e.vreg(ret, 0, true, ffi.typeof('int32_t')) -- Return is integer
277 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.trace_printk)
278 e.V[e.tmpvar].reg = nil -- Free temporary registers
282 local function perf_submit(e, dst, map_var, src)
284 local map = e.V[map_var].const
285 e.vcopy(e.tmpvar, map_var)
286 e.vreg(e.tmpvar, 2, true, ffi.typeof('uint64_t'))
287 e.LD_IMM_X(2, BPF.PSEUDO_MAP_FD, map.fd, ffi.sizeof('uint64_t'))
289 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
290 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
292 e.vset(e.tmpvar, nil, 0) -- BPF_F_CURRENT_CPU
293 e.vreg(e.tmpvar, 3, false, ffi.typeof('uint64_t'))
295 assert(e.V[src].const.__base, 'NYI: submit(map, var) - variable is not on stack')
296 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 4, 10, 0, 0)
297 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 4, 0, 0, -e.V[src].const.__base)
299 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 5, 0, 0, ffi.sizeof(e.V[src].type))
301 e.vset(dst)
302 e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
303 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.perf_event_output)
304 e.V[e.tmpvar].reg = nil -- Free temporary registers
308 local function load_bytes(e, dst, off, var)
310 e.vset(e.tmpvar, nil, off)
311 e.vreg(e.tmpvar, 2, false, ffi.typeof('uint64_t'))
313 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
314 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
316 assert(e.V[var].const.__base, 'NYI: load_bytes(off, var, len) - variable is not on stack')
317 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, 10, 0, 0)
318 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 3, 0, 0, -e.V[var].const.__base)
320 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 4, 0, 0, ffi.sizeof(e.V[var].type))
322 e.vset(dst)
323 e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
324 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.skb_load_bytes)
325 e.V[e.tmpvar].reg = nil -- Free temporary registers
329 local function stack_id(e, ret, map_var, key)
331 local map = e.V[map_var].const
332 e.vcopy(e.tmpvar, map_var)
333 e.vreg(e.tmpvar, 2, true, ffi.typeof('uint64_t'))
334 e.LD_IMM_X(2, BPF.PSEUDO_MAP_FD, map.fd, ffi.sizeof('uint64_t'))
336 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
337 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
339 local imm = e.V[key].const
341 e.reg_alloc(e.tmpvar, 3) -- Spill anything in R2 (unnamed tmp variable)
342 e.LD_IMM_X(3, 0, imm, 8)
344 e.vset(ret)
345 e.vreg(ret, 0, true, ffi.typeof('int32_t'))
346 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_stackid)
347 e.V[e.tmpvar].reg = nil -- Free temporary registers
352 builtins[table.insert] = function (e, dst, map_var, value)
353 assert(e.V[map_var].const.__map, 'NYI: table.insert() supported only on BPF maps')
354 return perf_submit(e, dst, map_var, value)
359 builtins[comm] = function (e, ret, dst)
361 assert(e.V[dst].const.__base, 'NYI: comm(buffer) - buffer variable is not on stack')
362 e.reg_alloc(e.tmpvar, 1) -- Spill
363 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
364 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
366 e.reg_alloc(e.tmpvar, 2) -- Spill
367 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(e.V[dst].type))
369 e.vset(ret)
370 e.vreg(ret, 0, true, ffi.typeof('int32_t'))
371 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_current_comm)
372 e.V[e.tmpvar].reg = nil -- Free temporary registers
377 builtins[math.log2] = function (e, dst, x)
382 local r = e.vreg(dst, nil, true)
383 e.emit(BPF.ALU64 + BPF.MOV + BPF.K, r, 0, 0, 0)
385 e.vcopy(e.tmpvar, x)
386 local v = e.vreg(e.tmpvar, 2)
387 if cdef.isptr(e.V[x].const) then -- No pointer arithmetics, dereference
388 e.vderef(v, v, {const = {__dissector=ffi.typeof('uint64_t')}})
391 e.emit(BPF.ALU64 + BPF.NEG + BPF.K, v, 0, 0, 0) -- v = ~v
396 e.emit(BPF.JMP + BPF.JGT + BPF.K, v, 0, 2, bit.bnot(bit.lshift(1, k))) -- if !upper_half(x)
397 e.emit(BPF.ALU64 + BPF.ARSH + BPF.K, v, 0, 0, k) -- v >>= k
398 e.emit(BPF.ALU64 + BPF.OR + BPF.K, r, 0, 0, k) -- r |= k
401 e.V[dst].const = nil
402 e.V[e.tmpvar].reg = nil
404 builtins[math.log10] = function (e, dst, x)
406 builtins[math.log2](e, dst, x)
408 local r = e.V[dst].reg
409 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1) -- Compensate round-down
410 e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 1233) -- log2(10) ~ 1233>>12
411 e.emit(BPF.ALU64 + BPF.RSH + BPF.K, r, 0, 0, 12)
413 builtins[math.log] = function (e, dst, x)
415 builtins[math.log2](e, dst, x)
417 local r = e.V[dst].reg
418 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1) -- Compensate round-down
419 e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 2839) -- log2(e) ~ 2839>>12
420 e.emit(BPF.ALU64 + BPF.RSH + BPF.K, r, 0, 0, 12)
424 local function call_helper(e, dst, h, vtype)
425 e.vset(dst)
426 e.vreg(dst, 0, true, vtype or ffi.typeof('uint64_t'))
427 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, h)
428 e.V[dst].const = nil -- Target is not a function anymore
445 builtins[cpu] = function (e, dst) return call_helper(e, dst, HELPER.get_smp_processor_id) end
446 builtins[rand] = function (e, dst) return call_helper(e, dst, HELPER.get_prandom_u32, ffi.typeof('u…
447 builtins[time] = function (e, dst) return call_helper(e, dst, HELPER.ktime_get_ns) end
448 builtins[pid_tgid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_pid_tgid) end
449 builtins[uid_gid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_uid_gid) end
450 builtins[perf_submit] = function (e, dst, map, value) return perf_submit(e, dst, map, value) end
451 builtins[stack_id] = function (e, dst, map, key) return stack_id(e, dst, map, key) end
452 builtins[load_bytes] = function (e, dst, off, var, len) return load_bytes(e, dst, off, var, len) end