1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * queue_stack_maps.c: BPF queue and stack maps
4 *
5 * Copyright (c) 2018 Politecnico di Torino
6 */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include "percpu_freelist.h"
12
13 #define QUEUE_STACK_CREATE_FLAG_MASK \
14 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15
16 struct bpf_queue_stack {
17 struct bpf_map map;
18 raw_spinlock_t lock;
19 u32 head, tail;
20 u32 size; /* max_entries + 1 */
21
22 char elements[] __aligned(8);
23 };
24
bpf_queue_stack(struct bpf_map * map)25 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26 {
27 return container_of(map, struct bpf_queue_stack, map);
28 }
29
queue_stack_map_is_empty(struct bpf_queue_stack * qs)30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31 {
32 return qs->head == qs->tail;
33 }
34
queue_stack_map_is_full(struct bpf_queue_stack * qs)35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36 {
37 u32 head = qs->head + 1;
38
39 if (unlikely(head >= qs->size))
40 head = 0;
41
42 return head == qs->tail;
43 }
44
45 /* Called from syscall */
queue_stack_map_alloc_check(union bpf_attr * attr)46 static int queue_stack_map_alloc_check(union bpf_attr *attr)
47 {
48 if (!bpf_capable())
49 return -EPERM;
50
51 /* check sanity of attributes */
52 if (attr->max_entries == 0 || attr->key_size != 0 ||
53 attr->value_size == 0 ||
54 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55 !bpf_map_flags_access_ok(attr->map_flags))
56 return -EINVAL;
57
58 if (attr->value_size > KMALLOC_MAX_SIZE)
59 /* if value_size is bigger, the user space won't be able to
60 * access the elements.
61 */
62 return -E2BIG;
63
64 return 0;
65 }
66
queue_stack_map_alloc(union bpf_attr * attr)67 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68 {
69 int ret, numa_node = bpf_map_attr_numa_node(attr);
70 struct bpf_map_memory mem = {0};
71 struct bpf_queue_stack *qs;
72 u64 size, queue_size, cost;
73
74 size = (u64) attr->max_entries + 1;
75 cost = queue_size = sizeof(*qs) + size * attr->value_size;
76
77 ret = bpf_map_charge_init(&mem, cost);
78 if (ret < 0)
79 return ERR_PTR(ret);
80
81 qs = bpf_map_area_alloc(queue_size, numa_node);
82 if (!qs) {
83 bpf_map_charge_finish(&mem);
84 return ERR_PTR(-ENOMEM);
85 }
86
87 memset(qs, 0, sizeof(*qs));
88
89 bpf_map_init_from_attr(&qs->map, attr);
90
91 bpf_map_charge_move(&qs->map.memory, &mem);
92 qs->size = size;
93
94 raw_spin_lock_init(&qs->lock);
95
96 return &qs->map;
97 }
98
99 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
queue_stack_map_free(struct bpf_map * map)100 static void queue_stack_map_free(struct bpf_map *map)
101 {
102 struct bpf_queue_stack *qs = bpf_queue_stack(map);
103
104 bpf_map_area_free(qs);
105 }
106
__queue_map_get(struct bpf_map * map,void * value,bool delete)107 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
108 {
109 struct bpf_queue_stack *qs = bpf_queue_stack(map);
110 unsigned long flags;
111 int err = 0;
112 void *ptr;
113
114 if (in_nmi()) {
115 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
116 return -EBUSY;
117 } else {
118 raw_spin_lock_irqsave(&qs->lock, flags);
119 }
120
121 if (queue_stack_map_is_empty(qs)) {
122 memset(value, 0, qs->map.value_size);
123 err = -ENOENT;
124 goto out;
125 }
126
127 ptr = &qs->elements[qs->tail * qs->map.value_size];
128 memcpy(value, ptr, qs->map.value_size);
129
130 if (delete) {
131 if (unlikely(++qs->tail >= qs->size))
132 qs->tail = 0;
133 }
134
135 out:
136 raw_spin_unlock_irqrestore(&qs->lock, flags);
137 return err;
138 }
139
140
__stack_map_get(struct bpf_map * map,void * value,bool delete)141 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
142 {
143 struct bpf_queue_stack *qs = bpf_queue_stack(map);
144 unsigned long flags;
145 int err = 0;
146 void *ptr;
147 u32 index;
148
149 if (in_nmi()) {
150 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
151 return -EBUSY;
152 } else {
153 raw_spin_lock_irqsave(&qs->lock, flags);
154 }
155
156 if (queue_stack_map_is_empty(qs)) {
157 memset(value, 0, qs->map.value_size);
158 err = -ENOENT;
159 goto out;
160 }
161
162 index = qs->head - 1;
163 if (unlikely(index >= qs->size))
164 index = qs->size - 1;
165
166 ptr = &qs->elements[index * qs->map.value_size];
167 memcpy(value, ptr, qs->map.value_size);
168
169 if (delete)
170 qs->head = index;
171
172 out:
173 raw_spin_unlock_irqrestore(&qs->lock, flags);
174 return err;
175 }
176
177 /* Called from syscall or from eBPF program */
queue_map_peek_elem(struct bpf_map * map,void * value)178 static int queue_map_peek_elem(struct bpf_map *map, void *value)
179 {
180 return __queue_map_get(map, value, false);
181 }
182
183 /* Called from syscall or from eBPF program */
stack_map_peek_elem(struct bpf_map * map,void * value)184 static int stack_map_peek_elem(struct bpf_map *map, void *value)
185 {
186 return __stack_map_get(map, value, false);
187 }
188
189 /* Called from syscall or from eBPF program */
queue_map_pop_elem(struct bpf_map * map,void * value)190 static int queue_map_pop_elem(struct bpf_map *map, void *value)
191 {
192 return __queue_map_get(map, value, true);
193 }
194
195 /* Called from syscall or from eBPF program */
stack_map_pop_elem(struct bpf_map * map,void * value)196 static int stack_map_pop_elem(struct bpf_map *map, void *value)
197 {
198 return __stack_map_get(map, value, true);
199 }
200
201 /* Called from syscall or from eBPF program */
queue_stack_map_push_elem(struct bpf_map * map,void * value,u64 flags)202 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
203 u64 flags)
204 {
205 struct bpf_queue_stack *qs = bpf_queue_stack(map);
206 unsigned long irq_flags;
207 int err = 0;
208 void *dst;
209
210 /* BPF_EXIST is used to force making room for a new element in case the
211 * map is full
212 */
213 bool replace = (flags & BPF_EXIST);
214
215 /* Check supported flags for queue and stack maps */
216 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
217 return -EINVAL;
218
219 if (in_nmi()) {
220 if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
221 return -EBUSY;
222 } else {
223 raw_spin_lock_irqsave(&qs->lock, irq_flags);
224 }
225
226 if (queue_stack_map_is_full(qs)) {
227 if (!replace) {
228 err = -E2BIG;
229 goto out;
230 }
231 /* advance tail pointer to overwrite oldest element */
232 if (unlikely(++qs->tail >= qs->size))
233 qs->tail = 0;
234 }
235
236 dst = &qs->elements[qs->head * qs->map.value_size];
237 memcpy(dst, value, qs->map.value_size);
238
239 if (unlikely(++qs->head >= qs->size))
240 qs->head = 0;
241
242 out:
243 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
244 return err;
245 }
246
247 /* Called from syscall or from eBPF program */
queue_stack_map_lookup_elem(struct bpf_map * map,void * key)248 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
249 {
250 return NULL;
251 }
252
253 /* Called from syscall or from eBPF program */
queue_stack_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)254 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
255 void *value, u64 flags)
256 {
257 return -EINVAL;
258 }
259
260 /* Called from syscall or from eBPF program */
queue_stack_map_delete_elem(struct bpf_map * map,void * key)261 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
262 {
263 return -EINVAL;
264 }
265
266 /* Called from syscall */
queue_stack_map_get_next_key(struct bpf_map * map,void * key,void * next_key)267 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
268 void *next_key)
269 {
270 return -EINVAL;
271 }
272
273 static int queue_map_btf_id;
274 const struct bpf_map_ops queue_map_ops = {
275 .map_meta_equal = bpf_map_meta_equal,
276 .map_alloc_check = queue_stack_map_alloc_check,
277 .map_alloc = queue_stack_map_alloc,
278 .map_free = queue_stack_map_free,
279 .map_lookup_elem = queue_stack_map_lookup_elem,
280 .map_update_elem = queue_stack_map_update_elem,
281 .map_delete_elem = queue_stack_map_delete_elem,
282 .map_push_elem = queue_stack_map_push_elem,
283 .map_pop_elem = queue_map_pop_elem,
284 .map_peek_elem = queue_map_peek_elem,
285 .map_get_next_key = queue_stack_map_get_next_key,
286 .map_btf_name = "bpf_queue_stack",
287 .map_btf_id = &queue_map_btf_id,
288 };
289
290 static int stack_map_btf_id;
291 const struct bpf_map_ops stack_map_ops = {
292 .map_meta_equal = bpf_map_meta_equal,
293 .map_alloc_check = queue_stack_map_alloc_check,
294 .map_alloc = queue_stack_map_alloc,
295 .map_free = queue_stack_map_free,
296 .map_lookup_elem = queue_stack_map_lookup_elem,
297 .map_update_elem = queue_stack_map_update_elem,
298 .map_delete_elem = queue_stack_map_delete_elem,
299 .map_push_elem = queue_stack_map_push_elem,
300 .map_pop_elem = stack_map_pop_elem,
301 .map_peek_elem = stack_map_peek_elem,
302 .map_get_next_key = queue_stack_map_get_next_key,
303 .map_btf_name = "bpf_queue_stack",
304 .map_btf_id = &stack_map_btf_id,
305 };
306