1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * queue_stack_maps.c: BPF queue and stack maps
4 *
5 * Copyright (c) 2018 Politecnico di Torino
6 */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include <linux/btf_ids.h>
12 #include "percpu_freelist.h"
13
14 #define QUEUE_STACK_CREATE_FLAG_MASK \
15 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
16
17 struct bpf_queue_stack {
18 struct bpf_map map;
19 raw_spinlock_t lock;
20 u32 head, tail;
21 u32 size; /* max_entries + 1 */
22
23 char elements[] __aligned(8);
24 };
25
bpf_queue_stack(struct bpf_map * map)26 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27 {
28 return container_of(map, struct bpf_queue_stack, map);
29 }
30
queue_stack_map_is_empty(struct bpf_queue_stack * qs)31 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32 {
33 return qs->head == qs->tail;
34 }
35
queue_stack_map_is_full(struct bpf_queue_stack * qs)36 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37 {
38 u32 head = qs->head + 1;
39
40 if (unlikely(head >= qs->size))
41 head = 0;
42
43 return head == qs->tail;
44 }
45
46 /* Called from syscall */
queue_stack_map_alloc_check(union bpf_attr * attr)47 static int queue_stack_map_alloc_check(union bpf_attr *attr)
48 {
49 if (!bpf_capable())
50 return -EPERM;
51
52 /* check sanity of attributes */
53 if (attr->max_entries == 0 || attr->key_size != 0 ||
54 attr->value_size == 0 ||
55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
56 !bpf_map_flags_access_ok(attr->map_flags))
57 return -EINVAL;
58
59 if (attr->value_size > KMALLOC_MAX_SIZE)
60 /* if value_size is bigger, the user space won't be able to
61 * access the elements.
62 */
63 return -E2BIG;
64
65 return 0;
66 }
67
queue_stack_map_alloc(union bpf_attr * attr)68 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
69 {
70 int numa_node = bpf_map_attr_numa_node(attr);
71 struct bpf_queue_stack *qs;
72 u64 size, queue_size;
73
74 size = (u64) attr->max_entries + 1;
75 queue_size = sizeof(*qs) + size * attr->value_size;
76
77 qs = bpf_map_area_alloc(queue_size, numa_node);
78 if (!qs)
79 return ERR_PTR(-ENOMEM);
80
81 bpf_map_init_from_attr(&qs->map, attr);
82
83 qs->size = size;
84
85 raw_spin_lock_init(&qs->lock);
86
87 return &qs->map;
88 }
89
90 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
queue_stack_map_free(struct bpf_map * map)91 static void queue_stack_map_free(struct bpf_map *map)
92 {
93 struct bpf_queue_stack *qs = bpf_queue_stack(map);
94
95 bpf_map_area_free(qs);
96 }
97
__queue_map_get(struct bpf_map * map,void * value,bool delete)98 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
99 {
100 struct bpf_queue_stack *qs = bpf_queue_stack(map);
101 unsigned long flags;
102 int err = 0;
103 void *ptr;
104
105 if (in_nmi()) {
106 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
107 return -EBUSY;
108 } else {
109 raw_spin_lock_irqsave(&qs->lock, flags);
110 }
111
112 if (queue_stack_map_is_empty(qs)) {
113 memset(value, 0, qs->map.value_size);
114 err = -ENOENT;
115 goto out;
116 }
117
118 ptr = &qs->elements[qs->tail * qs->map.value_size];
119 memcpy(value, ptr, qs->map.value_size);
120
121 if (delete) {
122 if (unlikely(++qs->tail >= qs->size))
123 qs->tail = 0;
124 }
125
126 out:
127 raw_spin_unlock_irqrestore(&qs->lock, flags);
128 return err;
129 }
130
131
__stack_map_get(struct bpf_map * map,void * value,bool delete)132 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
133 {
134 struct bpf_queue_stack *qs = bpf_queue_stack(map);
135 unsigned long flags;
136 int err = 0;
137 void *ptr;
138 u32 index;
139
140 if (in_nmi()) {
141 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
142 return -EBUSY;
143 } else {
144 raw_spin_lock_irqsave(&qs->lock, flags);
145 }
146
147 if (queue_stack_map_is_empty(qs)) {
148 memset(value, 0, qs->map.value_size);
149 err = -ENOENT;
150 goto out;
151 }
152
153 index = qs->head - 1;
154 if (unlikely(index >= qs->size))
155 index = qs->size - 1;
156
157 ptr = &qs->elements[index * qs->map.value_size];
158 memcpy(value, ptr, qs->map.value_size);
159
160 if (delete)
161 qs->head = index;
162
163 out:
164 raw_spin_unlock_irqrestore(&qs->lock, flags);
165 return err;
166 }
167
168 /* Called from syscall or from eBPF program */
queue_map_peek_elem(struct bpf_map * map,void * value)169 static int queue_map_peek_elem(struct bpf_map *map, void *value)
170 {
171 return __queue_map_get(map, value, false);
172 }
173
174 /* Called from syscall or from eBPF program */
stack_map_peek_elem(struct bpf_map * map,void * value)175 static int stack_map_peek_elem(struct bpf_map *map, void *value)
176 {
177 return __stack_map_get(map, value, false);
178 }
179
180 /* Called from syscall or from eBPF program */
queue_map_pop_elem(struct bpf_map * map,void * value)181 static int queue_map_pop_elem(struct bpf_map *map, void *value)
182 {
183 return __queue_map_get(map, value, true);
184 }
185
186 /* Called from syscall or from eBPF program */
stack_map_pop_elem(struct bpf_map * map,void * value)187 static int stack_map_pop_elem(struct bpf_map *map, void *value)
188 {
189 return __stack_map_get(map, value, true);
190 }
191
192 /* Called from syscall or from eBPF program */
queue_stack_map_push_elem(struct bpf_map * map,void * value,u64 flags)193 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
194 u64 flags)
195 {
196 struct bpf_queue_stack *qs = bpf_queue_stack(map);
197 unsigned long irq_flags;
198 int err = 0;
199 void *dst;
200
201 /* BPF_EXIST is used to force making room for a new element in case the
202 * map is full
203 */
204 bool replace = (flags & BPF_EXIST);
205
206 /* Check supported flags for queue and stack maps */
207 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
208 return -EINVAL;
209
210 if (in_nmi()) {
211 if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
212 return -EBUSY;
213 } else {
214 raw_spin_lock_irqsave(&qs->lock, irq_flags);
215 }
216
217 if (queue_stack_map_is_full(qs)) {
218 if (!replace) {
219 err = -E2BIG;
220 goto out;
221 }
222 /* advance tail pointer to overwrite oldest element */
223 if (unlikely(++qs->tail >= qs->size))
224 qs->tail = 0;
225 }
226
227 dst = &qs->elements[qs->head * qs->map.value_size];
228 memcpy(dst, value, qs->map.value_size);
229
230 if (unlikely(++qs->head >= qs->size))
231 qs->head = 0;
232
233 out:
234 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
235 return err;
236 }
237
238 /* Called from syscall or from eBPF program */
queue_stack_map_lookup_elem(struct bpf_map * map,void * key)239 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
240 {
241 return NULL;
242 }
243
244 /* Called from syscall or from eBPF program */
queue_stack_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)245 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
246 void *value, u64 flags)
247 {
248 return -EINVAL;
249 }
250
251 /* Called from syscall or from eBPF program */
queue_stack_map_delete_elem(struct bpf_map * map,void * key)252 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
253 {
254 return -EINVAL;
255 }
256
257 /* Called from syscall */
queue_stack_map_get_next_key(struct bpf_map * map,void * key,void * next_key)258 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
259 void *next_key)
260 {
261 return -EINVAL;
262 }
263
264 BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
265 const struct bpf_map_ops queue_map_ops = {
266 .map_meta_equal = bpf_map_meta_equal,
267 .map_alloc_check = queue_stack_map_alloc_check,
268 .map_alloc = queue_stack_map_alloc,
269 .map_free = queue_stack_map_free,
270 .map_lookup_elem = queue_stack_map_lookup_elem,
271 .map_update_elem = queue_stack_map_update_elem,
272 .map_delete_elem = queue_stack_map_delete_elem,
273 .map_push_elem = queue_stack_map_push_elem,
274 .map_pop_elem = queue_map_pop_elem,
275 .map_peek_elem = queue_map_peek_elem,
276 .map_get_next_key = queue_stack_map_get_next_key,
277 .map_btf_id = &queue_map_btf_ids[0],
278 };
279
280 const struct bpf_map_ops stack_map_ops = {
281 .map_meta_equal = bpf_map_meta_equal,
282 .map_alloc_check = queue_stack_map_alloc_check,
283 .map_alloc = queue_stack_map_alloc,
284 .map_free = queue_stack_map_free,
285 .map_lookup_elem = queue_stack_map_lookup_elem,
286 .map_update_elem = queue_stack_map_update_elem,
287 .map_delete_elem = queue_stack_map_delete_elem,
288 .map_push_elem = queue_stack_map_push_elem,
289 .map_pop_elem = stack_map_pop_elem,
290 .map_peek_elem = stack_map_peek_elem,
291 .map_get_next_key = queue_stack_map_get_next_key,
292 .map_btf_id = &queue_map_btf_ids[0],
293 };
294