• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* XDP monitor tool, based on tracepoints
2  *
3  *  Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4  */
5 #include <uapi/linux/bpf.h>
6 #include "bpf_helpers.h"
7 
8 struct bpf_map_def SEC("maps") redirect_err_cnt = {
9 	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
10 	.key_size = sizeof(u32),
11 	.value_size = sizeof(u64),
12 	.max_entries = 2,
13 	/* TODO: have entries for all possible errno's */
14 };
15 
16 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
17  * Code in:                kernel/include/trace/events/xdp.h
18  */
19 struct xdp_redirect_ctx {
20 	unsigned short common_type;	//	offset:0;  size:2; signed:0;
21 	unsigned char common_flags;	//	offset:2;  size:1; signed:0;
22 	unsigned char common_preempt_count;//	offset:3;  size:1; signed:0;
23 	int common_pid;			//	offset:4;  size:4; signed:1;
24 
25 	int prog_id;			//	offset:8;  size:4; signed:1;
26 	u32 act;			//	offset:12  size:4; signed:0;
27 	int ifindex;			//	offset:16  size:4; signed:1;
28 	int err;			//	offset:20  size:4; signed:1;
29 	int to_ifindex;			//	offset:24  size:4; signed:1;
30 	u32 map_id;			//	offset:28  size:4; signed:0;
31 	int map_index;			//	offset:32  size:4; signed:1;
32 };					//	offset:36
33 
34 enum {
35 	XDP_REDIRECT_SUCCESS = 0,
36 	XDP_REDIRECT_ERROR = 1
37 };
38 
39 static __always_inline
xdp_redirect_collect_stat(struct xdp_redirect_ctx * ctx)40 int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
41 {
42 	u32 key = XDP_REDIRECT_ERROR;
43 	int err = ctx->err;
44 	u64 *cnt;
45 
46 	if (!err)
47 		key = XDP_REDIRECT_SUCCESS;
48 
49 	cnt  = bpf_map_lookup_elem(&redirect_err_cnt, &key);
50 	if (!cnt)
51 		return 0;
52 	*cnt += 1;
53 
54 	return 0; /* Indicate event was filtered (no further processing)*/
55 	/*
56 	 * Returning 1 here would allow e.g. a perf-record tracepoint
57 	 * to see and record these events, but it doesn't work well
58 	 * in-practice as stopping perf-record also unload this
59 	 * bpf_prog.  Plus, there is additional overhead of doing so.
60 	 */
61 }
62 
63 SEC("tracepoint/xdp/xdp_redirect_err")
trace_xdp_redirect_err(struct xdp_redirect_ctx * ctx)64 int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
65 {
66 	return xdp_redirect_collect_stat(ctx);
67 }
68 
69 
70 SEC("tracepoint/xdp/xdp_redirect_map_err")
trace_xdp_redirect_map_err(struct xdp_redirect_ctx * ctx)71 int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
72 {
73 	return xdp_redirect_collect_stat(ctx);
74 }
75 
76 /* Likely unloaded when prog starts */
77 SEC("tracepoint/xdp/xdp_redirect")
trace_xdp_redirect(struct xdp_redirect_ctx * ctx)78 int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
79 {
80 	return xdp_redirect_collect_stat(ctx);
81 }
82 
83 /* Likely unloaded when prog starts */
84 SEC("tracepoint/xdp/xdp_redirect_map")
trace_xdp_redirect_map(struct xdp_redirect_ctx * ctx)85 int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
86 {
87 	return xdp_redirect_collect_stat(ctx);
88 }
89