• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <linux/bug.h>
15 #include <net/page_pool.h>
16 
17 #include <net/xdp.h>
18 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19 #include <trace/events/xdp.h>
20 #include <net/xdp_sock_drv.h>
21 
22 #define REG_STATE_NEW		0x0
23 #define REG_STATE_REGISTERED	0x1
24 #define REG_STATE_UNREGISTERED	0x2
25 #define REG_STATE_UNUSED	0x3
26 
27 static DEFINE_IDA(mem_id_pool);
28 static DEFINE_MUTEX(mem_id_lock);
29 #define MEM_ID_MAX 0xFFFE
30 #define MEM_ID_MIN 1
31 static int mem_id_next = MEM_ID_MIN;
32 
33 static bool mem_id_init; /* false */
34 static struct rhashtable *mem_id_ht;
35 
xdp_mem_id_hashfn(const void * data,u32 len,u32 seed)36 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37 {
38 	const u32 *k = data;
39 	const u32 key = *k;
40 
41 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42 		     != sizeof(u32));
43 
44 	/* Use cyclic increasing ID as direct hash key */
45 	return key;
46 }
47 
xdp_mem_id_cmp(struct rhashtable_compare_arg * arg,const void * ptr)48 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 			  const void *ptr)
50 {
51 	const struct xdp_mem_allocator *xa = ptr;
52 	u32 mem_id = *(u32 *)arg->key;
53 
54 	return xa->mem.id != mem_id;
55 }
56 
57 static const struct rhashtable_params mem_id_rht_params = {
58 	.nelem_hint = 64,
59 	.head_offset = offsetof(struct xdp_mem_allocator, node),
60 	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
61 	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62 	.max_size = MEM_ID_MAX,
63 	.min_size = 8,
64 	.automatic_shrinking = true,
65 	.hashfn    = xdp_mem_id_hashfn,
66 	.obj_cmpfn = xdp_mem_id_cmp,
67 };
68 
__xdp_mem_allocator_rcu_free(struct rcu_head * rcu)69 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70 {
71 	struct xdp_mem_allocator *xa;
72 
73 	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74 
75 	/* Allow this ID to be reused */
76 	ida_simple_remove(&mem_id_pool, xa->mem.id);
77 
78 	kfree(xa);
79 }
80 
mem_xa_remove(struct xdp_mem_allocator * xa)81 static void mem_xa_remove(struct xdp_mem_allocator *xa)
82 {
83 	trace_mem_disconnect(xa);
84 
85 	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87 }
88 
mem_allocator_disconnect(void * allocator)89 static void mem_allocator_disconnect(void *allocator)
90 {
91 	struct xdp_mem_allocator *xa;
92 	struct rhashtable_iter iter;
93 
94 	mutex_lock(&mem_id_lock);
95 
96 	rhashtable_walk_enter(mem_id_ht, &iter);
97 	do {
98 		rhashtable_walk_start(&iter);
99 
100 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 			if (xa->allocator == allocator)
102 				mem_xa_remove(xa);
103 		}
104 
105 		rhashtable_walk_stop(&iter);
106 
107 	} while (xa == ERR_PTR(-EAGAIN));
108 	rhashtable_walk_exit(&iter);
109 
110 	mutex_unlock(&mem_id_lock);
111 }
112 
xdp_unreg_mem_model(struct xdp_mem_info * mem)113 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
114 {
115 	struct xdp_mem_allocator *xa;
116 	int type = mem->type;
117 	int id = mem->id;
118 
119 	/* Reset mem info to defaults */
120 	mem->id = 0;
121 	mem->type = 0;
122 
123 	if (id == 0)
124 		return;
125 
126 	if (type == MEM_TYPE_PAGE_POOL) {
127 		rcu_read_lock();
128 		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129 		page_pool_destroy(xa->page_pool);
130 		rcu_read_unlock();
131 	}
132 }
133 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
134 
xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info * xdp_rxq)135 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
136 {
137 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
138 		WARN(1, "Missing register, driver bug");
139 		return;
140 	}
141 
142 	xdp_unreg_mem_model(&xdp_rxq->mem);
143 }
144 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
145 
xdp_rxq_info_unreg(struct xdp_rxq_info * xdp_rxq)146 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
147 {
148 	/* Simplify driver cleanup code paths, allow unreg "unused" */
149 	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
150 		return;
151 
152 	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
153 
154 	xdp_rxq_info_unreg_mem_model(xdp_rxq);
155 
156 	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
157 	xdp_rxq->dev = NULL;
158 }
159 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
160 
xdp_rxq_info_init(struct xdp_rxq_info * xdp_rxq)161 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
162 {
163 	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
164 }
165 
166 /* Returns 0 on success, negative on failure */
xdp_rxq_info_reg(struct xdp_rxq_info * xdp_rxq,struct net_device * dev,u32 queue_index,unsigned int napi_id)167 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
168 		     struct net_device *dev, u32 queue_index, unsigned int napi_id)
169 {
170 	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
171 		WARN(1, "Driver promised not to register this");
172 		return -EINVAL;
173 	}
174 
175 	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
176 		WARN(1, "Missing unregister, handled but fix driver");
177 		xdp_rxq_info_unreg(xdp_rxq);
178 	}
179 
180 	if (!dev) {
181 		WARN(1, "Missing net_device from driver");
182 		return -ENODEV;
183 	}
184 
185 	/* State either UNREGISTERED or NEW */
186 	xdp_rxq_info_init(xdp_rxq);
187 	xdp_rxq->dev = dev;
188 	xdp_rxq->queue_index = queue_index;
189 	xdp_rxq->napi_id = napi_id;
190 
191 	xdp_rxq->reg_state = REG_STATE_REGISTERED;
192 	return 0;
193 }
194 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
195 
xdp_rxq_info_unused(struct xdp_rxq_info * xdp_rxq)196 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
197 {
198 	xdp_rxq->reg_state = REG_STATE_UNUSED;
199 }
200 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
201 
xdp_rxq_info_is_reg(struct xdp_rxq_info * xdp_rxq)202 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
203 {
204 	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
205 }
206 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
207 
__mem_id_init_hash_table(void)208 static int __mem_id_init_hash_table(void)
209 {
210 	struct rhashtable *rht;
211 	int ret;
212 
213 	if (unlikely(mem_id_init))
214 		return 0;
215 
216 	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
217 	if (!rht)
218 		return -ENOMEM;
219 
220 	ret = rhashtable_init(rht, &mem_id_rht_params);
221 	if (ret < 0) {
222 		kfree(rht);
223 		return ret;
224 	}
225 	mem_id_ht = rht;
226 	smp_mb(); /* mutex lock should provide enough pairing */
227 	mem_id_init = true;
228 
229 	return 0;
230 }
231 
232 /* Allocate a cyclic ID that maps to allocator pointer.
233  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
234  *
235  * Caller must lock mem_id_lock.
236  */
__mem_id_cyclic_get(gfp_t gfp)237 static int __mem_id_cyclic_get(gfp_t gfp)
238 {
239 	int retries = 1;
240 	int id;
241 
242 again:
243 	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
244 	if (id < 0) {
245 		if (id == -ENOSPC) {
246 			/* Cyclic allocator, reset next id */
247 			if (retries--) {
248 				mem_id_next = MEM_ID_MIN;
249 				goto again;
250 			}
251 		}
252 		return id; /* errno */
253 	}
254 	mem_id_next = id + 1;
255 
256 	return id;
257 }
258 
__is_supported_mem_type(enum xdp_mem_type type)259 static bool __is_supported_mem_type(enum xdp_mem_type type)
260 {
261 	if (type == MEM_TYPE_PAGE_POOL)
262 		return is_page_pool_compiled_in();
263 
264 	if (type >= MEM_TYPE_MAX)
265 		return false;
266 
267 	return true;
268 }
269 
__xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)270 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
271 						     enum xdp_mem_type type,
272 						     void *allocator)
273 {
274 	struct xdp_mem_allocator *xdp_alloc;
275 	gfp_t gfp = GFP_KERNEL;
276 	int id, errno, ret;
277 	void *ptr;
278 
279 	if (!__is_supported_mem_type(type))
280 		return ERR_PTR(-EOPNOTSUPP);
281 
282 	mem->type = type;
283 
284 	if (!allocator) {
285 		if (type == MEM_TYPE_PAGE_POOL)
286 			return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
287 		return NULL;
288 	}
289 
290 	/* Delay init of rhashtable to save memory if feature isn't used */
291 	if (!mem_id_init) {
292 		mutex_lock(&mem_id_lock);
293 		ret = __mem_id_init_hash_table();
294 		mutex_unlock(&mem_id_lock);
295 		if (ret < 0) {
296 			WARN_ON(1);
297 			return ERR_PTR(ret);
298 		}
299 	}
300 
301 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
302 	if (!xdp_alloc)
303 		return ERR_PTR(-ENOMEM);
304 
305 	mutex_lock(&mem_id_lock);
306 	id = __mem_id_cyclic_get(gfp);
307 	if (id < 0) {
308 		errno = id;
309 		goto err;
310 	}
311 	mem->id = id;
312 	xdp_alloc->mem = *mem;
313 	xdp_alloc->allocator = allocator;
314 
315 	/* Insert allocator into ID lookup table */
316 	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
317 	if (IS_ERR(ptr)) {
318 		ida_simple_remove(&mem_id_pool, mem->id);
319 		mem->id = 0;
320 		errno = PTR_ERR(ptr);
321 		goto err;
322 	}
323 
324 	if (type == MEM_TYPE_PAGE_POOL)
325 		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
326 
327 	mutex_unlock(&mem_id_lock);
328 
329 	return xdp_alloc;
330 err:
331 	mutex_unlock(&mem_id_lock);
332 	kfree(xdp_alloc);
333 	return ERR_PTR(errno);
334 }
335 
xdp_reg_mem_model(struct xdp_mem_info * mem,enum xdp_mem_type type,void * allocator)336 int xdp_reg_mem_model(struct xdp_mem_info *mem,
337 		      enum xdp_mem_type type, void *allocator)
338 {
339 	struct xdp_mem_allocator *xdp_alloc;
340 
341 	xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
342 	if (IS_ERR(xdp_alloc))
343 		return PTR_ERR(xdp_alloc);
344 	return 0;
345 }
346 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
347 
xdp_rxq_info_reg_mem_model(struct xdp_rxq_info * xdp_rxq,enum xdp_mem_type type,void * allocator)348 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
349 			       enum xdp_mem_type type, void *allocator)
350 {
351 	struct xdp_mem_allocator *xdp_alloc;
352 
353 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
354 		WARN(1, "Missing register, driver bug");
355 		return -EFAULT;
356 	}
357 
358 	xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
359 	if (IS_ERR(xdp_alloc))
360 		return PTR_ERR(xdp_alloc);
361 
362 	if (trace_mem_connect_enabled() && xdp_alloc)
363 		trace_mem_connect(xdp_alloc, xdp_rxq);
364 	return 0;
365 }
366 
367 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
368 
369 /* XDP RX runs under NAPI protection, and in different delivery error
370  * scenarios (e.g. queue full), it is possible to return the xdp_frame
371  * while still leveraging this protection.  The @napi_direct boolean
372  * is used for those calls sites.  Thus, allowing for faster recycling
373  * of xdp_frames/pages in those cases.
374  */
__xdp_return(void * data,struct xdp_mem_info * mem,bool napi_direct,struct xdp_buff * xdp)375 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
376 			 struct xdp_buff *xdp)
377 {
378 	struct xdp_mem_allocator *xa;
379 	struct page *page;
380 
381 	switch (mem->type) {
382 	case MEM_TYPE_PAGE_POOL:
383 		rcu_read_lock();
384 		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
385 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
386 		page = virt_to_head_page(data);
387 		if (napi_direct && xdp_return_frame_no_direct())
388 			napi_direct = false;
389 		page_pool_put_full_page(xa->page_pool, page, napi_direct);
390 		rcu_read_unlock();
391 		break;
392 	case MEM_TYPE_PAGE_SHARED:
393 		page_frag_free(data);
394 		break;
395 	case MEM_TYPE_PAGE_ORDER0:
396 		page = virt_to_page(data); /* Assumes order0 page*/
397 		put_page(page);
398 		break;
399 	case MEM_TYPE_XSK_BUFF_POOL:
400 		/* NB! Only valid from an xdp_buff! */
401 		xsk_buff_free(xdp);
402 		break;
403 	default:
404 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
405 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
406 		break;
407 	}
408 }
409 
xdp_return_frame(struct xdp_frame * xdpf)410 void xdp_return_frame(struct xdp_frame *xdpf)
411 {
412 	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
413 }
414 EXPORT_SYMBOL_GPL(xdp_return_frame);
415 
xdp_return_frame_rx_napi(struct xdp_frame * xdpf)416 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
417 {
418 	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
419 }
420 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
421 
422 /* XDP bulk APIs introduce a defer/flush mechanism to return
423  * pages belonging to the same xdp_mem_allocator object
424  * (identified via the mem.id field) in bulk to optimize
425  * I-cache and D-cache.
426  * The bulk queue size is set to 16 to be aligned to how
427  * XDP_REDIRECT bulking works. The bulk is flushed when
428  * it is full or when mem.id changes.
429  * xdp_frame_bulk is usually stored/allocated on the function
430  * call-stack to avoid locking penalties.
431  */
xdp_flush_frame_bulk(struct xdp_frame_bulk * bq)432 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
433 {
434 	struct xdp_mem_allocator *xa = bq->xa;
435 
436 	if (unlikely(!xa || !bq->count))
437 		return;
438 
439 	page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
440 	/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
441 	bq->count = 0;
442 }
443 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
444 
445 /* Must be called with rcu_read_lock held */
xdp_return_frame_bulk(struct xdp_frame * xdpf,struct xdp_frame_bulk * bq)446 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
447 			   struct xdp_frame_bulk *bq)
448 {
449 	struct xdp_mem_info *mem = &xdpf->mem;
450 	struct xdp_mem_allocator *xa;
451 
452 	if (mem->type != MEM_TYPE_PAGE_POOL) {
453 		__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
454 		return;
455 	}
456 
457 	xa = bq->xa;
458 	if (unlikely(!xa)) {
459 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
460 		bq->count = 0;
461 		bq->xa = xa;
462 	}
463 
464 	if (bq->count == XDP_BULK_QUEUE_SIZE)
465 		xdp_flush_frame_bulk(bq);
466 
467 	if (unlikely(mem->id != xa->mem.id)) {
468 		xdp_flush_frame_bulk(bq);
469 		bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
470 	}
471 
472 	bq->q[bq->count++] = xdpf->data;
473 }
474 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
475 
xdp_return_buff(struct xdp_buff * xdp)476 void xdp_return_buff(struct xdp_buff *xdp)
477 {
478 	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
479 }
480 
481 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
__xdp_release_frame(void * data,struct xdp_mem_info * mem)482 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
483 {
484 	struct xdp_mem_allocator *xa;
485 	struct page *page;
486 
487 	rcu_read_lock();
488 	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
489 	page = virt_to_head_page(data);
490 	if (xa)
491 		page_pool_release_page(xa->page_pool, page);
492 	rcu_read_unlock();
493 }
494 EXPORT_SYMBOL_GPL(__xdp_release_frame);
495 
xdp_attachment_setup(struct xdp_attachment_info * info,struct netdev_bpf * bpf)496 void xdp_attachment_setup(struct xdp_attachment_info *info,
497 			  struct netdev_bpf *bpf)
498 {
499 	if (info->prog)
500 		bpf_prog_put(info->prog);
501 	info->prog = bpf->prog;
502 	info->flags = bpf->flags;
503 }
504 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
505 
xdp_convert_zc_to_xdp_frame(struct xdp_buff * xdp)506 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
507 {
508 	unsigned int metasize, totsize;
509 	void *addr, *data_to_copy;
510 	struct xdp_frame *xdpf;
511 	struct page *page;
512 
513 	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
514 	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
515 		   xdp->data - xdp->data_meta;
516 	totsize = xdp->data_end - xdp->data + metasize;
517 
518 	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
519 		return NULL;
520 
521 	page = dev_alloc_page();
522 	if (!page)
523 		return NULL;
524 
525 	addr = page_to_virt(page);
526 	xdpf = addr;
527 	memset(xdpf, 0, sizeof(*xdpf));
528 
529 	addr += sizeof(*xdpf);
530 	data_to_copy = metasize ? xdp->data_meta : xdp->data;
531 	memcpy(addr, data_to_copy, totsize);
532 
533 	xdpf->data = addr + metasize;
534 	xdpf->len = totsize - metasize;
535 	xdpf->headroom = 0;
536 	xdpf->metasize = metasize;
537 	xdpf->frame_sz = PAGE_SIZE;
538 	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
539 
540 	xsk_buff_free(xdp);
541 	return xdpf;
542 }
543 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
544 
545 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
xdp_warn(const char * msg,const char * func,const int line)546 void xdp_warn(const char *msg, const char *func, const int line)
547 {
548 	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
549 };
550 EXPORT_SYMBOL_GPL(xdp_warn);
551 
xdp_alloc_skb_bulk(void ** skbs,int n_skb,gfp_t gfp)552 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
553 {
554 	n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
555 				      n_skb, skbs);
556 	if (unlikely(!n_skb))
557 		return -ENOMEM;
558 
559 	return 0;
560 }
561 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
562 
__xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct sk_buff * skb,struct net_device * dev)563 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
564 					   struct sk_buff *skb,
565 					   struct net_device *dev)
566 {
567 	unsigned int headroom, frame_size;
568 	void *hard_start;
569 
570 	/* Part of headroom was reserved to xdpf */
571 	headroom = sizeof(*xdpf) + xdpf->headroom;
572 
573 	/* Memory size backing xdp_frame data already have reserved
574 	 * room for build_skb to place skb_shared_info in tailroom.
575 	 */
576 	frame_size = xdpf->frame_sz;
577 
578 	hard_start = xdpf->data - headroom;
579 	skb = build_skb_around(skb, hard_start, frame_size);
580 	if (unlikely(!skb))
581 		return NULL;
582 
583 	skb_reserve(skb, headroom);
584 	__skb_put(skb, xdpf->len);
585 	if (xdpf->metasize)
586 		skb_metadata_set(skb, xdpf->metasize);
587 
588 	/* Essential SKB info: protocol and skb->dev */
589 	skb->protocol = eth_type_trans(skb, dev);
590 
591 	/* Optional SKB info, currently missing:
592 	 * - HW checksum info		(skb->ip_summed)
593 	 * - HW RX hash			(skb_set_hash)
594 	 * - RX ring dev queue index	(skb_record_rx_queue)
595 	 */
596 
597 	/* Until page_pool get SKB return path, release DMA here */
598 	xdp_release_frame(xdpf);
599 
600 	/* Allow SKB to reuse area used by xdp_frame */
601 	xdp_scrub_frame(xdpf);
602 
603 	return skb;
604 }
605 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
606 
xdp_build_skb_from_frame(struct xdp_frame * xdpf,struct net_device * dev)607 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
608 					 struct net_device *dev)
609 {
610 	struct sk_buff *skb;
611 
612 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
613 	if (unlikely(!skb))
614 		return NULL;
615 
616 	memset(skb, 0, offsetof(struct sk_buff, tail));
617 
618 	return __xdp_build_skb_from_frame(xdpf, skb, dev);
619 }
620 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
621 
xdpf_clone(struct xdp_frame * xdpf)622 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
623 {
624 	unsigned int headroom, totalsize;
625 	struct xdp_frame *nxdpf;
626 	struct page *page;
627 	void *addr;
628 
629 	headroom = xdpf->headroom + sizeof(*xdpf);
630 	totalsize = headroom + xdpf->len;
631 
632 	if (unlikely(totalsize > PAGE_SIZE))
633 		return NULL;
634 	page = dev_alloc_page();
635 	if (!page)
636 		return NULL;
637 	addr = page_to_virt(page);
638 
639 	memcpy(addr, xdpf, totalsize);
640 
641 	nxdpf = addr;
642 	nxdpf->data = addr + headroom;
643 	nxdpf->frame_sz = PAGE_SIZE;
644 	nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
645 	nxdpf->mem.id = 0;
646 
647 	return nxdpf;
648 }
649