• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  * Description: skb function.
15  * Author:
16  * Create: 2022-04-07
17  */
18 
19 #ifndef LITEOS_SKBUFF_H
20 #define LITEOS_SKBUFF_H
21 
22 /*****************************************************************************
23     1 其他头文件包含
24 *****************************************************************************/
25 #include <stdlib.h>
26 #ifndef FREERTOS_DEFINE
27 #include <linux/spinlock.h>
28 #endif
29 #include "osal_adapt.h"
30 #include "td_type.h"
31 #include "td_base.h"
32 #include "osal_types.h"
33 #ifdef _PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF
34 #include "oal_net_pkt_rom.h"
35 #endif
36 
37 #ifdef __cplusplus
38 #if __cplusplus
39 extern "C" {
40 #endif
41 #endif
42 
43 /*****************************************************************************
44     2 宏定义
45 *****************************************************************************/
46 /* Don't change this without changing skb_csum_unnecessary! */
47 #define CHECKSUM_NONE           0
48 #define CHECKSUM_UNNECESSARY    1
49 #define CHECKSUM_COMPLETE       2
50 #define CHECKSUM_PARTIAL        3
51 
52 #define L1_CACHE_BYTES          (1 << 5)
53 #define SMP_CACHE_BYTES         L1_CACHE_BYTES
54 #define skb_data_align(x)       (((x) + (SMP_CACHE_BYTES - 1)) & ~(SMP_CACHE_BYTES - 1))
55 
56 /* return minimum truesize of one skb containing X bytes of data */
57 #define skb_truesize(x)         ((x) + skb_data_align(sizeof(struct sk_buff)))
58 
59 #ifndef NET_SKB_PAD
60 #define NET_SKB_PAD     80
61 #endif
62 
63 #define NUMA_NO_NODE    (-1)
64 
65 #define USB_CACHE_ALIGN_SIZE 32
66 
67 #define SKB_ALLOC_FCLONE    0x01
68 #define SKB_ALLOC_RX        0x02
69 
70 #ifndef OFFSETOF
71 #ifdef HAVE_PCLINT_CHECK
72 #define oal_offsetof(type, member) 0
73 #else
74 #define oal_offsetof(type, member) ((long) &((type *) 0)->member)
75 #endif
76 #endif
77 
78 typedef td_u32 gfp_t;
79 typedef td_u32 sk_buff_data_t;
80 
81 /*****************************************************************************
82     3 结构体定义
83 *****************************************************************************/
84 struct sk_buff_head {
85     /* These two members must be first. */
86     struct sk_buff  *next;
87     struct sk_buff  *prev;
88 
89     td_u32          qlen;
90     osal_spinlock   lock;
91 };
92 
93 struct sk_buff {
94     /* These two members must be first. */
95     struct sk_buff *next;
96     struct sk_buff *prev;
97 #ifdef _PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF
98     oal_dmac_netbuf_stru *pkt_buf;
99 #endif
100     td_void        *dev;               /* for hwal_netif_rx */
101     td_u32          len;
102     td_u32          data_len;
103     td_u16          queue_mapping;
104 
105     /* These elements must be at the end, see alloc_skb() for details. */
106     sk_buff_data_t  tail;
107     sk_buff_data_t  end;
108 
109     td_s8           cb[48];  /* 48: SIZE(0..48) */
110     td_u8          *head;
111     td_u8          *data;
112 
113     td_u32          truesize;
114     td_u32          priority;
115     osal_atomic       users;
116 
117     /* use for lwip_pbuf zero_copy:actual start addr of memory space */
118     td_u8           *mem_head;
119     td_u32          protocol;
120 
121     td_u16          mac_header;
122     td_u8           resv2;
123     td_u8           resv;
124 #ifdef TIMESTAMP_RECORD_DEBUG
125     td_u32          times[19]; /* timestamp for debug */
126 #endif
127 };
128 
129 typedef struct sk_buff oal_netbuf_stru;
130 typedef struct sk_buff_head oal_netbuf_head_stru;
131 
132 /*****************************************************************************
133     4 函数声明
134 *****************************************************************************/
135 td_void skb_trim(struct sk_buff *skb, td_u32 len);
136 struct sk_buff *skb_unshare(struct sk_buff *skb, td_u32 pri);
137 td_s32 pskb_expand_head(struct sk_buff *skb, td_u32 nhead, td_u32 ntail, td_s32 gfp_mask);
138 struct sk_buff *alloc_skb(td_u32 size);
139 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
140 td_void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
141 td_void dev_kfree_skb(struct sk_buff *skb);
142 struct sk_buff *dev_alloc_skb(td_u32 length);
143 td_u8 *skb_put(struct sk_buff *skb, td_u32 len);
144 #define dev_kfree_skb_any(a) dev_kfree_skb(a)
145 
146 /*****************************************************************************
147     5 内联函数
148 *****************************************************************************/
_skb_queue_head_init(struct sk_buff_head * list)149 static inline td_void _skb_queue_head_init(struct sk_buff_head *list)
150 {
151     list->prev = list->next = (struct sk_buff *)(uintptr_t)list;
152     list->qlen = 0;
153 }
154 
skb_queue_head_init(struct sk_buff_head * list)155 static inline td_void skb_queue_head_init(struct sk_buff_head *list)
156 {
157 #ifndef FREERTOS_DEFINE
158     /* liteos list操作不用osal_spin_lock_init, 因为给lock申请的内存 可能没有流程保障释放 */
159     spin_lock_init(&list->lock);
160 #else
161     /* 其他系统下 init函数中不会给lock申请内存 */
162     osal_spin_lock_init(&list->lock);
163 #endif
164     _skb_queue_head_init(list);
165 }
166 
skb_reset_tail_pointer(struct sk_buff * skb)167 static inline td_void skb_reset_tail_pointer(struct sk_buff *skb)
168 {
169     skb->tail = (sk_buff_data_t)(skb->data - skb->head);
170 }
171 
skb_tail_pointer(const struct sk_buff * skb)172 static inline td_u8 *skb_tail_pointer(const struct sk_buff *skb)
173 {
174     td_u8 *phead = skb->head;
175     return (phead + skb->tail);
176 }
177 
skb_queue_empty(const struct sk_buff_head * list)178 static inline td_s32 skb_queue_empty(const struct sk_buff_head *list)
179 {
180     return list->next == (struct sk_buff *)(uintptr_t)list;
181 }
182 
skb_reserve(struct sk_buff * skb,td_u32 len)183 static inline td_void skb_reserve(struct sk_buff *skb, td_u32 len)
184 {
185     skb->data += len;
186     skb->tail += len;
187 }
188 
_dev_alloc_skb(td_u32 length)189 static inline struct sk_buff *_dev_alloc_skb(td_u32 length)
190 {
191     struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD);
192     if (skb != NULL) {
193         skb_reserve(skb, NET_SKB_PAD);
194     }
195     return skb;
196 }
197 
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)198 static inline td_void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
199 {
200     struct sk_buff *next = NULL;
201     struct sk_buff *prev = NULL;
202 
203     list->qlen--;
204     next       = skb->next;
205     prev       = skb->prev;
206     skb->next  = skb->prev = NULL;
207     next->prev = prev;
208     prev->next = next;
209 }
210 
skb_get(struct sk_buff * skb)211 static inline struct sk_buff *skb_get(struct sk_buff *skb)
212 {
213     osal_adapt_atomic_inc(&skb->users);
214     return skb;
215 }
216 
skb_peek(const struct sk_buff_head * list_)217 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
218 {
219     struct sk_buff *list = ((const struct sk_buff *)(uintptr_t)list_)->next;
220     if (list == (struct sk_buff *)(uintptr_t)list_) {
221         list = NULL;
222     }
223     return list;
224 }
225 
skb_peek_next(const struct sk_buff * skb,const struct sk_buff_head * list_)226 static inline struct sk_buff *skb_peek_next(const struct sk_buff *skb, const struct sk_buff_head *list_)
227 {
228     struct sk_buff *next = skb->next;
229 
230     if (next == (struct sk_buff *)(uintptr_t)list_) {
231         next = NULL;
232     }
233     return next;
234 }
235 
skb_peek_tail(const struct sk_buff_head * list_)236 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
237 {
238     struct sk_buff *skb = list_->prev;
239 
240     if (skb == (struct sk_buff *)(uintptr_t)list_) {
241         skb = NULL;
242     }
243     return skb;
244 }
245 
_skb_dequeue(struct sk_buff_head * list)246 static inline struct sk_buff *_skb_dequeue(struct sk_buff_head *list)
247 {
248     struct sk_buff *skb = skb_peek(list);
249     if (skb) {
250         skb_unlink(skb, list);
251     }
252     return skb;
253 }
254 
_skb_dequeue_tail(struct sk_buff_head * list)255 static inline struct sk_buff *_skb_dequeue_tail(struct sk_buff_head *list)
256 {
257     struct sk_buff *skb = skb_peek_tail(list);
258     if (skb) {
259         skb_unlink(skb, list);
260     }
261     return skb;
262 }
263 
skb_headlen(const struct sk_buff * skb)264 static inline td_s32 skb_headlen(const struct sk_buff *skb)
265 {
266     return (td_s32)(skb->len - skb->data_len);
267 }
268 
_skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)269 static inline td_void _skb_insert(struct sk_buff *newsk,
270                                   struct sk_buff *prev, struct sk_buff *next,
271                                   struct sk_buff_head *list)
272 {
273     newsk->next = next;
274     newsk->prev = prev;
275     next->prev  = prev->next = newsk;
276     list->qlen++;
277 }
278 
_skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)279 static inline td_void _skb_queue_before(struct sk_buff_head *list, struct sk_buff *next, struct sk_buff *newsk)
280 {
281     _skb_insert(newsk, next->prev, next, list);
282 }
283 
__skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)284 static inline td_void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
285 {
286     _skb_queue_before(list, (struct sk_buff *)(uintptr_t)list, newsk);
287 }
288 
_skb_queue_splice(const struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * next)289 static inline td_void _skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *next)
290 {
291     struct sk_buff *first = list->next;
292     struct sk_buff *last = list->prev;
293 
294     first->prev = prev;
295     prev->next = first;
296 
297     last->next = next;
298     next->prev = last;
299 }
300 
skb_queue_splice(const struct sk_buff_head * list,struct sk_buff_head * head)301 static inline td_void skb_queue_splice(const struct sk_buff_head *list, struct sk_buff_head *head)
302 {
303     if (skb_queue_empty(list) == 0) {
304         _skb_queue_splice(list, (struct sk_buff *) head, head->next);
305         head->qlen += list->qlen;
306     }
307 }
308 
skb_queue_splice_tail_init(struct sk_buff_head * list,struct sk_buff_head * head)309 static inline td_void skb_queue_splice_tail_init(struct sk_buff_head *list, struct sk_buff_head *head)
310 {
311     if (skb_queue_empty(list) == 0) {
312         _skb_queue_splice(list, head->prev, (struct sk_buff *) head);
313         head->qlen += list->qlen;
314         _skb_queue_head_init(list);
315     }
316 }
317 
skb_queue_splice_init(struct sk_buff_head * list,struct sk_buff_head * head)318 static inline td_void skb_queue_splice_init(struct sk_buff_head *list, struct sk_buff_head *head)
319 {
320     if (skb_queue_empty(list) == 0) {
321         _skb_queue_splice(list, (struct sk_buff *)(uintptr_t)head, head->next);
322         head->qlen += list->qlen;
323         _skb_queue_head_init(list);
324     }
325 }
326 
skb_pull(struct sk_buff * skb,td_u32 len)327 static inline td_u8 *skb_pull(struct sk_buff *skb, td_u32 len)
328 {
329     skb->len -= len;
330     return skb->data += len;
331 }
332 
skb_headroom(const struct sk_buff * skb)333 static inline td_s32 skb_headroom(const struct sk_buff *skb)
334 {
335     return (td_s32)(skb->data - skb->head);
336 }
337 
skb_is_nonlinear(const struct sk_buff * skb)338 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
339 {
340     return skb->data_len;
341 }
342 
skb_set_tail_pointer(struct sk_buff * skb,const td_u32 offset)343 static inline td_void skb_set_tail_pointer(struct sk_buff *skb, const td_u32 offset)
344 {
345     skb_reset_tail_pointer(skb);
346     skb->tail += offset;
347 }
348 
_skb_trim(struct sk_buff * skb,td_u32 len)349 static inline td_void _skb_trim(struct sk_buff *skb, td_u32 len)
350 {
351     if (skb_is_nonlinear(skb)) {
352         return;
353     }
354     skb->len = len;
355     skb_set_tail_pointer(skb, len);
356 }
357 
skb_push(struct sk_buff * skb,td_u32 len)358 static inline td_u8 *skb_push(struct sk_buff *skb, td_u32 len)
359 {
360     if (skb->data - len < skb->head) {
361         return NULL;
362     }
363 
364     skb->data -= len;
365     skb->len  += len;
366     return skb->data;
367 }
368 
skb_tailroom(const struct sk_buff * skb)369 static inline td_u32 skb_tailroom(const struct sk_buff *skb)
370 {
371     return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
372 }
373 
skb_queue_is_last(const struct sk_buff_head * list,const struct sk_buff * skb)374 static inline bool skb_queue_is_last(const struct sk_buff_head *list, const struct sk_buff *skb)
375 {
376     return skb->next == (struct sk_buff *)list;
377 }
378 
skb_end_pointer(const struct sk_buff * skb)379 static inline td_u8 *skb_end_pointer(const struct sk_buff *skb)
380 {
381     return skb->head + skb->end;
382 }
383 
skb_end_offset(const struct sk_buff * skb)384 static inline td_u32 skb_end_offset(const struct sk_buff *skb)
385 {
386     return skb->end;
387 }
388 
_skb_queue_after(struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * newsk)389 static inline td_void _skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk)
390 {
391     _skb_insert(newsk, prev, prev->next, list);
392 }
393 
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)394 static inline td_void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
395 {
396     _skb_queue_after(list, (struct sk_buff *)(uintptr_t)list, newsk);
397 }
398 
skb_set_queue_mapping(struct sk_buff * skb,td_u16 queue_mapping)399 static inline td_void skb_set_queue_mapping(struct sk_buff *skb, td_u16 queue_mapping)
400 {
401     skb->queue_mapping = queue_mapping;
402 }
403 
skb_get_queue_mapping(const struct sk_buff * skb)404 static inline td_u16 skb_get_queue_mapping(const struct sk_buff *skb)
405 {
406     return skb->queue_mapping;
407 }
408 
skb_copy_queue_mapping(struct sk_buff * to,const struct sk_buff * from)409 static inline td_void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
410 {
411     to->queue_mapping = from->queue_mapping;
412 }
413 
skb_queue_len(const struct sk_buff_head * list_)414 static inline td_u32 skb_queue_len(const struct sk_buff_head *list_)
415 {
416     return list_->qlen;
417 }
418 
419 #endif  /* LITEOS_SKBUFF_H */
420