Lines Matching +full:helper +full:- +full:function +full:- +full:name
1 --[[
8 http://www.apache.org/licenses/LICENSE-2.0
20 local BPF, HELPER = ffi.typeof('struct bpf'), ffi.typeof('struct bpf_func_id')
28 -- Built-ins that will be translated into BPF instructions
29 -- i.e. bit.bor(0xf0, 0x0f) becomes {'alu64, or, k', reg(0xf0), reg(0x0f), 0, 0}
38 -- Extensions and intrinsics
41 local function width_type(w)
42 -- Note: ffi.typeof doesn't accept '?' as template
47 -- Return struct member size/type (requires LuaJIT 2.1+)
48 -- I am ashamed that there's no easier way around it.
49 local function sizeofattr(ct, name)
55 if cinfo.name == name then break end
57 local size = math.max(1, ffi.typeinfo(cinfo.sib or ct).size - cinfo.size)
58 -- Guess type name
63 -- Byte-order conversions for little endian
64 local function ntoh(x, w)
68 local function hton(x, w) return ntoh(x, w) end
71 builtins[ntoh] = function (e, dst, a, w)
72 -- This is trickery, but TO_LE means cpu_to_le(),
73 -- and we want exactly the opposite as network is always 'be'
75 if w == 8 then return end -- NOOP
76 assert(w <= 64, 'NYI: hton(a[, width]) - operand larger than register width')
77 -- Allocate registers and execute
81 builtins[hton] = function (e, dst, a, w)
83 if w == 8 then return end -- NOOP
84 assert(w <= 64, 'NYI: hton(a[, width]) - operand larger than register width')
85 -- Allocate registers and execute
89 -- Byte-order conversions for big endian are no-ops
91 ntoh = function (x, w)
95 builtins[ntoh] = function(_, _, _) return end
96 builtins[hton] = function(_, _, _) return end
98 -- Other built-ins
99 local function xadd() error('NYI') end
101 builtins[xadd] = function (e, ret, a, b, off)
103 assert(vinfo and vinfo.__dissector, 'xadd(a, b[, offset]) called on non-pointer')
105 -- Calculate structure attribute offsets
109 assert(off, 'xadd(a, b, offset) - offset is not valid in given structure')
112 assert(w == 4 or w == 8, 'NYI: xadd() - 1 and 2 byte atomic increments are not supported')
113 -- Allocate registers and execute
116 -- Set variable for return value and call
119 -- Optimize the NULL check away if provably not NULL
121 e.emit(BPF.JMP + BPF.JEQ + BPF.K, dst_reg, 0, 1, 0) -- if (dst != NULL)
126 local function probe_read() error('NYI') end
128 builtins[probe_read] = function (e, ret, dst, src, vtype, ofs)
130 -- Load stack pointer to dst, since only load to stack memory is supported
131 -- we have to use allocated stack memory or create a new allocation and convert
132 -- to pointer type
135 builtins[ffi.new](e, dst, vtype) -- Allocate stack memory
137 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
138 -- Set stack memory maximum size bound
142 -- Dereference pointer type to pointed type for size calculation
143 if vtype:sub(-1) == '*' then vtype = vtype:sub(0, -2) end
147 -- Set source pointer
149 e.reg_alloc(e.tmpvar, 3) -- Copy from original register
153 e.reg_spill(src) -- Spill to avoid overwriting
158 -- Call probe read helper
162 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
163 e.V[e.tmpvar].reg = nil -- Free temporary registers
166 builtins[ffi.cast] = function (e, dst, ct, x)
173 -- Specific types also encode source of the data
174 -- This is because BPF has different helpers for reading
175 -- different data sources, so variables must track origins.
176 -- struct pt_regs - source of the data is probe
177 -- struct skb - source of the data is socket buffer
178 -- struct X - source of the data is probe/tracepoint
184 builtins[ffi.new] = function (e, dst, ct, x)
186 ct = ffi.typeof(e.V[ct].const) -- Get ctype from variable
188 assert(not x, 'NYI: ffi.new(ctype, ...) - initializer is not supported')
189 assert(not cdef.isptr(ct, true), 'NYI: ffi.new(ctype, ...) - ctype MUST NOT be a pointer')
193 -- Set array dissector if created an array
194 -- e.g. if ct is 'char [2]', then dissector is 'char'
201 builtins[ffi.copy] = function (e, ret, dst, src)
202 assert(cdef.isptr(e.V[dst].type), 'ffi.copy(dst, src) - dst MUST be a pointer type')
203 assert(cdef.isptr(e.V[src].type), 'ffi.copy(dst, src) - src MUST be a pointer type')
204 -- Specific types also encode source of the data
205 -- struct pt_regs - source of the data is probe
206 -- struct skb - source of the data is socket buffer
209 -- Load stack pointer to dst, since only load to stack memory is supported
210 -- we have to either use spilled variable or allocated stack memory offset
213 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].spill)
215 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
216 else error('ffi.copy(dst, src) - can\'t get stack offset of dst') end
217 -- Set stack memory maximum size bound
219 if dst_tname:sub(-1) == '*' then dst_tname = dst_tname:sub(0, -2) end
222 -- Set source pointer
224 e.reg_alloc(e.tmpvar, 3) -- Copy from original register
228 e.reg_spill(src) -- Spill to avoid overwriting
230 -- Call probe read helper
233 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
234 e.V[e.tmpvar].reg = nil -- Free temporary registers
236 error('NYI: ffi.copy(dst, src) - src is backed by BPF map')
238 error('NYI: ffi.copy(dst, src) - src is backed by socket buffer')
240 -- TODO: identify cheap register move
241 -- TODO: identify copy to/from stack
242 error('NYI: ffi.copy(dst, src) - src is neither BPF map/socket buffer or probe')
245 -- print(format, ...) builtin changes semantics from Lua print(...)
246 -- the first parameter has to be format and only reduced set of conversion specificers
247 -- is allowed: %d %u %x %ld %lu %lx %lld %llu %llx %p %s
248 builtins[print] = function (e, ret, fmt, a1, a2, a3)
249 -- Load format string and length
256 -- TODO: this is materialize step
259 elseif e.V[fmt].const.__base then -- luacheck: ignore
260 -- NOP
261 else error('NYI: print(fmt, ...) - format variable is not literal/stack memory') end
262 -- Prepare helper call
264 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[fmt].const.__base)
268 assert(#args <= 3, 'print(fmt, ...) - maximum of 3 arguments supported')
270 e.vcopy(e.tmpvar, arg) -- Copy variable
271 e.vreg(e.tmpvar, 3+i-1) -- Materialize it in arg register
274 -- Call helper
276 e.vreg(ret, 0, true, ffi.typeof('int32_t')) -- Return is integer
277 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.trace_printk)
278 e.V[e.tmpvar].reg = nil -- Free temporary registers
281 -- Implements bpf_perf_event_output(ctx, map, flags, var, vlen) on perf event map
282 local function perf_submit(e, dst, map_var, src)
283 -- Set R2 = map fd (indirect load)
288 -- Set R1 = ctx
289 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
290 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
291 -- Set R3 = flags
292 e.vset(e.tmpvar, nil, 0) -- BPF_F_CURRENT_CPU
294 -- Set R4 = pointer to src on stack
295 assert(e.V[src].const.__base, 'NYI: submit(map, var) - variable is not on stack')
297 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 4, 0, 0, -e.V[src].const.__base)
298 -- Set R5 = src length
300 -- Set R0 = ret and call
302 e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
303 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.perf_event_output)
304 e.V[e.tmpvar].reg = nil -- Free temporary registers
307 -- Implements bpf_skb_load_bytes(ctx, off, var, vlen) on skb->data
308 local function load_bytes(e, dst, off, var)
309 -- Set R2 = offset
312 -- Set R1 = ctx
313 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
314 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
315 -- Set R3 = pointer to var on stack
316 assert(e.V[var].const.__base, 'NYI: load_bytes(off, var, len) - variable is not on stack')
318 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 3, 0, 0, -e.V[var].const.__base)
319 -- Set R4 = var length
321 -- Set R0 = ret and call
323 e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
324 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.skb_load_bytes)
325 e.V[e.tmpvar].reg = nil -- Free temporary registers
328 -- Implements bpf_get_stack_id()
329 local function stack_id(e, ret, map_var, key)
330 -- Set R2 = map fd (indirect load)
335 -- Set R1 = ctx
336 e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
337 e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
338 -- Load flags in R2 (immediate value or key)
341 e.reg_alloc(e.tmpvar, 3) -- Spill anything in R2 (unnamed tmp variable)
343 -- Return R0 as signed integer
346 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_stackid)
347 e.V[e.tmpvar].reg = nil -- Free temporary registers
350 -- table.insert(table, value) keeps semantics with the exception of BPF maps
351 -- map `perf_event` -> submit inserted value
352 builtins[table.insert] = function (e, dst, map_var, value)
357 -- bpf_get_current_comm(buffer) - write current process name to byte buffer
358 local function comm() error('NYI') end
359 builtins[comm] = function (e, ret, dst)
360 -- Set R1 = buffer
361 assert(e.V[dst].const.__base, 'NYI: comm(buffer) - buffer variable is not on stack')
362 e.reg_alloc(e.tmpvar, 1) -- Spill
364 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
365 -- Set R2 = length
366 e.reg_alloc(e.tmpvar, 2) -- Spill
368 -- Return is integer
371 e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_current_comm)
372 e.V[e.tmpvar].reg = nil -- Free temporary registers
375 -- Math library built-ins
376 math.log2 = function () error('NYI') end
377 builtins[math.log2] = function (e, dst, x)
378 -- Classic integer bits subdivison algorithm to find the position
379 -- of the highest bit set, adapted for BPF bytecode-friendly operations.
380 -- https://graphics.stanford.edu/~seander/bithacks.html
381 -- r = 0
384 -- v = x
387 if cdef.isptr(e.V[x].const) then -- No pointer arithmetics, dereference
390 -- Invert value to invert all tests, otherwise we would need and+jnz
391 e.emit(BPF.ALU64 + BPF.NEG + BPF.K, v, 0, 0, 0) -- v = ~v
392 -- Unrolled test cases, converted masking to arithmetic as we don't have "if !(a & b)"
393 -- As we're testing inverted value, we have to use arithmetic shift to copy MSB
394 for i=4,0,-1 do
396 e.emit(BPF.JMP + BPF.JGT + BPF.K, v, 0, 2, bit.bnot(bit.lshift(1, k))) -- if !upper_half(x)
397 e.emit(BPF.ALU64 + BPF.ARSH + BPF.K, v, 0, 0, k) -- v >>= k
398 e.emit(BPF.ALU64 + BPF.OR + BPF.K, r, 0, 0, k) -- r |= k
400 -- No longer constant, cleanup tmpvars
404 builtins[math.log10] = function (e, dst, x)
405 -- Compute log2(x) and transform
407 -- Relationship: log10(v) = log2(v) / log2(10)
409 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1) -- Compensate round-down
410 e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 1233) -- log2(10) ~ 1233>>12
413 builtins[math.log] = function (e, dst, x)
414 -- Compute log2(x) and transform
416 -- Relationship: ln(v) = log2(v) / log2(e)
418 e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1) -- Compensate round-down
419 e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 2839) -- log2(e) ~ 2839>>12
423 -- Call-type helpers
424 local function call_helper(e, dst, h, vtype)
428 e.V[dst].const = nil -- Target is not a function anymore
430 local function cpu() error('NYI') end
431 local function rand() error('NYI') end
432 local function time() error('NYI') end
433 local function pid_tgid() error('NYI') end
434 local function uid_gid() error('NYI') end
436 -- Export helpers and builtin variants
445 builtins[cpu] = function (e, dst) return call_helper(e, dst, HELPER.get_smp_processor_id) end
446 builtins[rand] = function (e, dst) return call_helper(e, dst, HELPER.get_prandom_u32, ffi.typeof('u…
447 builtins[time] = function (e, dst) return call_helper(e, dst, HELPER.ktime_get_ns) end
448 builtins[pid_tgid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_pid_tgid) end
449 builtins[uid_gid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_uid_gid) end
450 builtins[perf_submit] = function (e, dst, map, value) return perf_submit(e, dst, map, value) end
451 builtins[stack_id] = function (e, dst, map, key) return stack_id(e, dst, map, key) end
452 builtins[load_bytes] = function (e, dst, off, var, len) return load_bytes(e, dst, off, var, len) end