• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * common eBPF ELF operations.
3  *
4  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation;
11  * version 2.1 of the License (not later!)
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this program; if not,  see <http://www.gnu.org/licenses>
20  */
21 
22 #include <stdlib.h>
23 #include <memory.h>
24 #include <unistd.h>
25 #include <asm/unistd.h>
26 #include <linux/bpf.h>
27 #include "bpf.h"
28 
29 /*
30  * When building perf, unistd.h is overridden. __NR_bpf is
31  * required to be defined explicitly.
32  */
33 #ifndef __NR_bpf
34 # if defined(__i386__)
35 #  define __NR_bpf 357
36 # elif defined(__x86_64__)
37 #  define __NR_bpf 321
38 # elif defined(__aarch64__)
39 #  define __NR_bpf 280
40 # elif defined(__sparc__)
41 #  define __NR_bpf 349
42 # elif defined(__s390__)
43 #  define __NR_bpf 351
44 # elif defined(__arc__)
45 #  define __NR_bpf 280
46 # else
47 #  error __NR_bpf not defined. libbpf does not support your arch.
48 # endif
49 #endif
50 
ptr_to_u64(const void * ptr)51 static inline __u64 ptr_to_u64(const void *ptr)
52 {
53 	return (__u64) (unsigned long) ptr;
54 }
55 
sys_bpf(enum bpf_cmd cmd,union bpf_attr * attr,unsigned int size)56 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
57 			  unsigned int size)
58 {
59 	return syscall(__NR_bpf, cmd, attr, size);
60 }
61 
bpf_create_map_node(enum bpf_map_type map_type,int key_size,int value_size,int max_entries,__u32 map_flags,int node)62 int bpf_create_map_node(enum bpf_map_type map_type, int key_size,
63 			int value_size, int max_entries, __u32 map_flags,
64 			int node)
65 {
66 	union bpf_attr attr;
67 
68 	memset(&attr, '\0', sizeof(attr));
69 
70 	attr.map_type = map_type;
71 	attr.key_size = key_size;
72 	attr.value_size = value_size;
73 	attr.max_entries = max_entries;
74 	attr.map_flags = map_flags;
75 	if (node >= 0) {
76 		attr.map_flags |= BPF_F_NUMA_NODE;
77 		attr.numa_node = node;
78 	}
79 
80 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
81 }
82 
bpf_create_map(enum bpf_map_type map_type,int key_size,int value_size,int max_entries,__u32 map_flags)83 int bpf_create_map(enum bpf_map_type map_type, int key_size,
84 		   int value_size, int max_entries, __u32 map_flags)
85 {
86 	return bpf_create_map_node(map_type, key_size, value_size,
87 				   max_entries, map_flags, -1);
88 }
89 
bpf_create_map_in_map_node(enum bpf_map_type map_type,int key_size,int inner_map_fd,int max_entries,__u32 map_flags,int node)90 int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size,
91 			       int inner_map_fd, int max_entries,
92 			       __u32 map_flags, int node)
93 {
94 	union bpf_attr attr;
95 
96 	memset(&attr, '\0', sizeof(attr));
97 
98 	attr.map_type = map_type;
99 	attr.key_size = key_size;
100 	attr.value_size = 4;
101 	attr.inner_map_fd = inner_map_fd;
102 	attr.max_entries = max_entries;
103 	attr.map_flags = map_flags;
104 	if (node >= 0) {
105 		attr.map_flags |= BPF_F_NUMA_NODE;
106 		attr.numa_node = node;
107 	}
108 
109 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
110 }
111 
bpf_create_map_in_map(enum bpf_map_type map_type,int key_size,int inner_map_fd,int max_entries,__u32 map_flags)112 int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size,
113 			  int inner_map_fd, int max_entries, __u32 map_flags)
114 {
115 	return bpf_create_map_in_map_node(map_type, key_size, inner_map_fd,
116 					  max_entries, map_flags, -1);
117 }
118 
bpf_load_program(enum bpf_prog_type type,const struct bpf_insn * insns,size_t insns_cnt,const char * license,__u32 kern_version,char * log_buf,size_t log_buf_sz)119 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
120 		     size_t insns_cnt, const char *license,
121 		     __u32 kern_version, char *log_buf, size_t log_buf_sz)
122 {
123 	int fd;
124 	union bpf_attr attr;
125 
126 	bzero(&attr, sizeof(attr));
127 	attr.prog_type = type;
128 	attr.insn_cnt = (__u32)insns_cnt;
129 	attr.insns = ptr_to_u64(insns);
130 	attr.license = ptr_to_u64(license);
131 	attr.log_buf = ptr_to_u64(NULL);
132 	attr.log_size = 0;
133 	attr.log_level = 0;
134 	attr.kern_version = kern_version;
135 
136 	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
137 	if (fd >= 0 || !log_buf || !log_buf_sz)
138 		return fd;
139 
140 	/* Try again with log */
141 	attr.log_buf = ptr_to_u64(log_buf);
142 	attr.log_size = log_buf_sz;
143 	attr.log_level = 1;
144 	log_buf[0] = 0;
145 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
146 }
147 
bpf_verify_program(enum bpf_prog_type type,const struct bpf_insn * insns,size_t insns_cnt,int strict_alignment,const char * license,__u32 kern_version,char * log_buf,size_t log_buf_sz,int log_level)148 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
149 		       size_t insns_cnt, int strict_alignment,
150 		       const char *license, __u32 kern_version,
151 		       char *log_buf, size_t log_buf_sz, int log_level)
152 {
153 	union bpf_attr attr;
154 
155 	bzero(&attr, sizeof(attr));
156 	attr.prog_type = type;
157 	attr.insn_cnt = (__u32)insns_cnt;
158 	attr.insns = ptr_to_u64(insns);
159 	attr.license = ptr_to_u64(license);
160 	attr.log_buf = ptr_to_u64(log_buf);
161 	attr.log_size = log_buf_sz;
162 	attr.log_level = log_level;
163 	log_buf[0] = 0;
164 	attr.kern_version = kern_version;
165 	attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
166 
167 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
168 }
169 
bpf_map_update_elem(int fd,const void * key,const void * value,__u64 flags)170 int bpf_map_update_elem(int fd, const void *key, const void *value,
171 			__u64 flags)
172 {
173 	union bpf_attr attr;
174 
175 	bzero(&attr, sizeof(attr));
176 	attr.map_fd = fd;
177 	attr.key = ptr_to_u64(key);
178 	attr.value = ptr_to_u64(value);
179 	attr.flags = flags;
180 
181 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
182 }
183 
bpf_map_lookup_elem(int fd,const void * key,void * value)184 int bpf_map_lookup_elem(int fd, const void *key, void *value)
185 {
186 	union bpf_attr attr;
187 
188 	bzero(&attr, sizeof(attr));
189 	attr.map_fd = fd;
190 	attr.key = ptr_to_u64(key);
191 	attr.value = ptr_to_u64(value);
192 
193 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
194 }
195 
bpf_map_delete_elem(int fd,const void * key)196 int bpf_map_delete_elem(int fd, const void *key)
197 {
198 	union bpf_attr attr;
199 
200 	bzero(&attr, sizeof(attr));
201 	attr.map_fd = fd;
202 	attr.key = ptr_to_u64(key);
203 
204 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
205 }
206 
bpf_map_get_next_key(int fd,const void * key,void * next_key)207 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
208 {
209 	union bpf_attr attr;
210 
211 	bzero(&attr, sizeof(attr));
212 	attr.map_fd = fd;
213 	attr.key = ptr_to_u64(key);
214 	attr.next_key = ptr_to_u64(next_key);
215 
216 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
217 }
218 
bpf_obj_pin(int fd,const char * pathname)219 int bpf_obj_pin(int fd, const char *pathname)
220 {
221 	union bpf_attr attr;
222 
223 	bzero(&attr, sizeof(attr));
224 	attr.pathname = ptr_to_u64((void *)pathname);
225 	attr.bpf_fd = fd;
226 
227 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
228 }
229 
bpf_obj_get(const char * pathname)230 int bpf_obj_get(const char *pathname)
231 {
232 	union bpf_attr attr;
233 
234 	bzero(&attr, sizeof(attr));
235 	attr.pathname = ptr_to_u64((void *)pathname);
236 
237 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
238 }
239 
bpf_prog_attach(int prog_fd,int target_fd,enum bpf_attach_type type,unsigned int flags)240 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
241 		    unsigned int flags)
242 {
243 	union bpf_attr attr;
244 
245 	bzero(&attr, sizeof(attr));
246 	attr.target_fd	   = target_fd;
247 	attr.attach_bpf_fd = prog_fd;
248 	attr.attach_type   = type;
249 	attr.attach_flags  = flags;
250 
251 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
252 }
253 
bpf_prog_detach(int target_fd,enum bpf_attach_type type)254 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
255 {
256 	union bpf_attr attr;
257 
258 	bzero(&attr, sizeof(attr));
259 	attr.target_fd	 = target_fd;
260 	attr.attach_type = type;
261 
262 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
263 }
264 
bpf_prog_test_run(int prog_fd,int repeat,void * data,__u32 size,void * data_out,__u32 * size_out,__u32 * retval,__u32 * duration)265 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
266 		      void *data_out, __u32 *size_out, __u32 *retval,
267 		      __u32 *duration)
268 {
269 	union bpf_attr attr;
270 	int ret;
271 
272 	bzero(&attr, sizeof(attr));
273 	attr.test.prog_fd = prog_fd;
274 	attr.test.data_in = ptr_to_u64(data);
275 	attr.test.data_out = ptr_to_u64(data_out);
276 	attr.test.data_size_in = size;
277 	attr.test.repeat = repeat;
278 
279 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
280 	if (size_out)
281 		*size_out = attr.test.data_size_out;
282 	if (retval)
283 		*retval = attr.test.retval;
284 	if (duration)
285 		*duration = attr.test.duration;
286 	return ret;
287 }
288 
bpf_prog_get_next_id(__u32 start_id,__u32 * next_id)289 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
290 {
291 	union bpf_attr attr;
292 	int err;
293 
294 	bzero(&attr, sizeof(attr));
295 	attr.start_id = start_id;
296 
297 	err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
298 	if (!err)
299 		*next_id = attr.next_id;
300 
301 	return err;
302 }
303 
bpf_map_get_next_id(__u32 start_id,__u32 * next_id)304 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
305 {
306 	union bpf_attr attr;
307 	int err;
308 
309 	bzero(&attr, sizeof(attr));
310 	attr.start_id = start_id;
311 
312 	err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
313 	if (!err)
314 		*next_id = attr.next_id;
315 
316 	return err;
317 }
318 
bpf_prog_get_fd_by_id(__u32 id)319 int bpf_prog_get_fd_by_id(__u32 id)
320 {
321 	union bpf_attr attr;
322 
323 	bzero(&attr, sizeof(attr));
324 	attr.prog_id = id;
325 
326 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
327 }
328 
bpf_map_get_fd_by_id(__u32 id)329 int bpf_map_get_fd_by_id(__u32 id)
330 {
331 	union bpf_attr attr;
332 
333 	bzero(&attr, sizeof(attr));
334 	attr.map_id = id;
335 
336 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
337 }
338 
bpf_obj_get_info_by_fd(int prog_fd,void * info,__u32 * info_len)339 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
340 {
341 	union bpf_attr attr;
342 	int err;
343 
344 	bzero(&attr, sizeof(attr));
345 	attr.info.bpf_fd = prog_fd;
346 	attr.info.info_len = *info_len;
347 	attr.info.info = ptr_to_u64(info);
348 
349 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
350 	if (!err)
351 		*info_len = attr.info.info_len;
352 
353 	return err;
354 }
355