• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 
13 /* Devmaps primary use is as a backend map for XDP BPF helper call
14  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15  * spent some effort to ensure the datapath with redirect maps does not use
16  * any locking. This is a quick note on the details.
17  *
18  * We have three possible paths to get into the devmap control plane bpf
19  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20  * will invoke an update, delete, or lookup operation. To ensure updates and
21  * deletes appear atomic from the datapath side xchg() is used to modify the
22  * netdev_map array. Then because the datapath does a lookup into the netdev_map
23  * array (read-only) from an RCU critical section we use call_rcu() to wait for
24  * an rcu grace period before free'ing the old data structures. This ensures the
25  * datapath always has a valid copy. However, the datapath does a "flush"
26  * operation that pushes any pending packets in the driver outside the RCU
27  * critical section. Each bpf_dtab_netdev tracks these pending operations using
28  * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29  * until all bits are cleared indicating outstanding flush operations have
30  * completed.
31  *
32  * BPF syscalls may race with BPF program calls on any of the update, delete
33  * or lookup operations. As noted above the xchg() operation also keep the
34  * netdev_map consistent in this case. From the devmap side BPF programs
35  * calling into these operations are the same as multiple user space threads
36  * making system calls.
37  *
38  * Finally, any of the above may race with a netdev_unregister notifier. The
39  * unregister notifier must search for net devices in the map structure that
40  * contain a reference to the net device and remove them. This is a two step
41  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42  * check to see if the ifindex is the same as the net_device being removed.
43  * When removing the dev a cmpxchg() is used to ensure the correct dev is
44  * removed, in the case of a concurrent update or delete operation it is
45  * possible that the initially referenced dev is no longer in the map. As the
46  * notifier hook walks the map we know that new dev references can not be
47  * added by the user because core infrastructure ensures dev_get_by_index()
48  * calls will fail at this point.
49  */
50 #include <linux/bpf.h>
51 #include <linux/filter.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct bpf_dtab_netdev {
57 	struct net_device *dev;
58 	struct bpf_dtab *dtab;
59 	unsigned int bit;
60 	struct rcu_head rcu;
61 };
62 
63 struct bpf_dtab {
64 	struct bpf_map map;
65 	struct bpf_dtab_netdev **netdev_map;
66 	unsigned long __percpu *flush_needed;
67 	struct list_head list;
68 };
69 
70 static DEFINE_SPINLOCK(dev_map_lock);
71 static LIST_HEAD(dev_map_list);
72 
dev_map_bitmap_size(const union bpf_attr * attr)73 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
74 {
75 	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
76 }
77 
dev_map_alloc(union bpf_attr * attr)78 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
79 {
80 	struct bpf_dtab *dtab;
81 	int err = -EINVAL;
82 	u64 cost;
83 
84 	if (!capable(CAP_NET_ADMIN))
85 		return ERR_PTR(-EPERM);
86 
87 	/* check sanity of attributes */
88 	if (attr->max_entries == 0 || attr->key_size != 4 ||
89 	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
90 		return ERR_PTR(-EINVAL);
91 
92 	dtab = kzalloc(sizeof(*dtab), GFP_USER);
93 	if (!dtab)
94 		return ERR_PTR(-ENOMEM);
95 
96 	/* mandatory map attributes */
97 	dtab->map.map_type = attr->map_type;
98 	dtab->map.key_size = attr->key_size;
99 	dtab->map.value_size = attr->value_size;
100 	dtab->map.max_entries = attr->max_entries;
101 	dtab->map.map_flags = attr->map_flags;
102 	dtab->map.numa_node = bpf_map_attr_numa_node(attr);
103 
104 	/* make sure page count doesn't overflow */
105 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
106 	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
107 	if (cost >= U32_MAX - PAGE_SIZE)
108 		goto free_dtab;
109 
110 	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
111 
112 	/* if map size is larger than memlock limit, reject it early */
113 	err = bpf_map_precharge_memlock(dtab->map.pages);
114 	if (err)
115 		goto free_dtab;
116 
117 	err = -ENOMEM;
118 
119 	/* A per cpu bitfield with a bit per possible net device */
120 	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
121 						__alignof__(unsigned long),
122 						GFP_KERNEL | __GFP_NOWARN);
123 	if (!dtab->flush_needed)
124 		goto free_dtab;
125 
126 	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
127 					      sizeof(struct bpf_dtab_netdev *),
128 					      dtab->map.numa_node);
129 	if (!dtab->netdev_map)
130 		goto free_dtab;
131 
132 	spin_lock(&dev_map_lock);
133 	list_add_tail_rcu(&dtab->list, &dev_map_list);
134 	spin_unlock(&dev_map_lock);
135 
136 	return &dtab->map;
137 free_dtab:
138 	free_percpu(dtab->flush_needed);
139 	kfree(dtab);
140 	return ERR_PTR(err);
141 }
142 
dev_map_free(struct bpf_map * map)143 static void dev_map_free(struct bpf_map *map)
144 {
145 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
146 	int i, cpu;
147 
148 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
149 	 * so the programs (can be more than one that used this map) were
150 	 * disconnected from events. Wait for outstanding critical sections in
151 	 * these programs to complete. The rcu critical section only guarantees
152 	 * no further reads against netdev_map. It does __not__ ensure pending
153 	 * flush operations (if any) are complete.
154 	 */
155 
156 	spin_lock(&dev_map_lock);
157 	list_del_rcu(&dtab->list);
158 	spin_unlock(&dev_map_lock);
159 
160 	synchronize_rcu();
161 
162 	/* Make sure prior __dev_map_entry_free() have completed. */
163 	rcu_barrier();
164 
165 	/* To ensure all pending flush operations have completed wait for flush
166 	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
167 	 * Because the above synchronize_rcu() ensures the map is disconnected
168 	 * from the program we can assume no new bits will be set.
169 	 */
170 	for_each_online_cpu(cpu) {
171 		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
172 
173 		while (!bitmap_empty(bitmap, dtab->map.max_entries))
174 			cond_resched();
175 	}
176 
177 	for (i = 0; i < dtab->map.max_entries; i++) {
178 		struct bpf_dtab_netdev *dev;
179 
180 		dev = dtab->netdev_map[i];
181 		if (!dev)
182 			continue;
183 
184 		dev_put(dev->dev);
185 		kfree(dev);
186 	}
187 
188 	free_percpu(dtab->flush_needed);
189 	bpf_map_area_free(dtab->netdev_map);
190 	kfree(dtab);
191 }
192 
dev_map_get_next_key(struct bpf_map * map,void * key,void * next_key)193 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
194 {
195 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
196 	u32 index = key ? *(u32 *)key : U32_MAX;
197 	u32 *next = next_key;
198 
199 	if (index >= dtab->map.max_entries) {
200 		*next = 0;
201 		return 0;
202 	}
203 
204 	if (index == dtab->map.max_entries - 1)
205 		return -ENOENT;
206 	*next = index + 1;
207 	return 0;
208 }
209 
__dev_map_insert_ctx(struct bpf_map * map,u32 bit)210 void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
211 {
212 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
213 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
214 
215 	__set_bit(bit, bitmap);
216 }
217 
218 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
219  * from the driver before returning from its napi->poll() routine. The poll()
220  * routine is called either from busy_poll context or net_rx_action signaled
221  * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
222  * net device can be torn down. On devmap tear down we ensure the ctx bitmap
223  * is zeroed before completing to ensure all flush operations have completed.
224  */
__dev_map_flush(struct bpf_map * map)225 void __dev_map_flush(struct bpf_map *map)
226 {
227 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
228 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
229 	u32 bit;
230 
231 	for_each_set_bit(bit, bitmap, map->max_entries) {
232 		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
233 		struct net_device *netdev;
234 
235 		/* This is possible if the dev entry is removed by user space
236 		 * between xdp redirect and flush op.
237 		 */
238 		if (unlikely(!dev))
239 			continue;
240 
241 		__clear_bit(bit, bitmap);
242 		netdev = dev->dev;
243 		if (likely(netdev->netdev_ops->ndo_xdp_flush))
244 			netdev->netdev_ops->ndo_xdp_flush(netdev);
245 	}
246 }
247 
248 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
249  * update happens in parallel here a dev_put wont happen until after reading the
250  * ifindex.
251  */
__dev_map_lookup_elem(struct bpf_map * map,u32 key)252 struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
253 {
254 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
255 	struct bpf_dtab_netdev *dev;
256 
257 	if (key >= map->max_entries)
258 		return NULL;
259 
260 	dev = READ_ONCE(dtab->netdev_map[key]);
261 	return dev ? dev->dev : NULL;
262 }
263 
dev_map_lookup_elem(struct bpf_map * map,void * key)264 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
265 {
266 	struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key);
267 
268 	return dev ? &dev->ifindex : NULL;
269 }
270 
dev_map_flush_old(struct bpf_dtab_netdev * dev)271 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
272 {
273 	if (dev->dev->netdev_ops->ndo_xdp_flush) {
274 		struct net_device *fl = dev->dev;
275 		unsigned long *bitmap;
276 		int cpu;
277 
278 		for_each_online_cpu(cpu) {
279 			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
280 			__clear_bit(dev->bit, bitmap);
281 
282 			fl->netdev_ops->ndo_xdp_flush(dev->dev);
283 		}
284 	}
285 }
286 
__dev_map_entry_free(struct rcu_head * rcu)287 static void __dev_map_entry_free(struct rcu_head *rcu)
288 {
289 	struct bpf_dtab_netdev *dev;
290 
291 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
292 	dev_map_flush_old(dev);
293 	dev_put(dev->dev);
294 	kfree(dev);
295 }
296 
dev_map_delete_elem(struct bpf_map * map,void * key)297 static int dev_map_delete_elem(struct bpf_map *map, void *key)
298 {
299 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
300 	struct bpf_dtab_netdev *old_dev;
301 	int k = *(u32 *)key;
302 
303 	if (k >= map->max_entries)
304 		return -EINVAL;
305 
306 	/* Use call_rcu() here to ensure any rcu critical sections have
307 	 * completed, but this does not guarantee a flush has happened
308 	 * yet. Because driver side rcu_read_lock/unlock only protects the
309 	 * running XDP program. However, for pending flush operations the
310 	 * dev and ctx are stored in another per cpu map. And additionally,
311 	 * the driver tear down ensures all soft irqs are complete before
312 	 * removing the net device in the case of dev_put equals zero.
313 	 */
314 	old_dev = xchg(&dtab->netdev_map[k], NULL);
315 	if (old_dev)
316 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
317 	return 0;
318 }
319 
dev_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)320 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
321 				u64 map_flags)
322 {
323 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
324 	struct net *net = current->nsproxy->net_ns;
325 	struct bpf_dtab_netdev *dev, *old_dev;
326 	u32 i = *(u32 *)key;
327 	u32 ifindex = *(u32 *)value;
328 
329 	if (unlikely(map_flags > BPF_EXIST))
330 		return -EINVAL;
331 	if (unlikely(i >= dtab->map.max_entries))
332 		return -E2BIG;
333 	if (unlikely(map_flags == BPF_NOEXIST))
334 		return -EEXIST;
335 
336 	if (!ifindex) {
337 		dev = NULL;
338 	} else {
339 		dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
340 				   map->numa_node);
341 		if (!dev)
342 			return -ENOMEM;
343 
344 		dev->dev = dev_get_by_index(net, ifindex);
345 		if (!dev->dev) {
346 			kfree(dev);
347 			return -EINVAL;
348 		}
349 
350 		dev->bit = i;
351 		dev->dtab = dtab;
352 	}
353 
354 	/* Use call_rcu() here to ensure rcu critical sections have completed
355 	 * Remembering the driver side flush operation will happen before the
356 	 * net device is removed.
357 	 */
358 	old_dev = xchg(&dtab->netdev_map[i], dev);
359 	if (old_dev)
360 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
361 
362 	return 0;
363 }
364 
365 const struct bpf_map_ops dev_map_ops = {
366 	.map_alloc = dev_map_alloc,
367 	.map_free = dev_map_free,
368 	.map_get_next_key = dev_map_get_next_key,
369 	.map_lookup_elem = dev_map_lookup_elem,
370 	.map_update_elem = dev_map_update_elem,
371 	.map_delete_elem = dev_map_delete_elem,
372 };
373 
dev_map_notification(struct notifier_block * notifier,ulong event,void * ptr)374 static int dev_map_notification(struct notifier_block *notifier,
375 				ulong event, void *ptr)
376 {
377 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
378 	struct bpf_dtab *dtab;
379 	int i;
380 
381 	switch (event) {
382 	case NETDEV_UNREGISTER:
383 		/* This rcu_read_lock/unlock pair is needed because
384 		 * dev_map_list is an RCU list AND to ensure a delete
385 		 * operation does not free a netdev_map entry while we
386 		 * are comparing it against the netdev being unregistered.
387 		 */
388 		rcu_read_lock();
389 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
390 			for (i = 0; i < dtab->map.max_entries; i++) {
391 				struct bpf_dtab_netdev *dev, *odev;
392 
393 				dev = READ_ONCE(dtab->netdev_map[i]);
394 				if (!dev || netdev != dev->dev)
395 					continue;
396 				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
397 				if (dev == odev)
398 					call_rcu(&dev->rcu,
399 						 __dev_map_entry_free);
400 			}
401 		}
402 		rcu_read_unlock();
403 		break;
404 	default:
405 		break;
406 	}
407 	return NOTIFY_OK;
408 }
409 
410 static struct notifier_block dev_map_notifier = {
411 	.notifier_call = dev_map_notification,
412 };
413 
dev_map_init(void)414 static int __init dev_map_init(void)
415 {
416 	register_netdevice_notifier(&dev_map_notifier);
417 	return 0;
418 }
419 
420 subsys_initcall(dev_map_init);
421