• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // bindgen-flags: --rustified-enum ".*" --rust-target 1.40
2 
3 typedef unsigned char uint8_t;
4 typedef unsigned short uint16_t;
5 typedef unsigned int uint32_t;
6 typedef unsigned long long uint64_t;
7 
8 #define RTE_CACHE_LINE_SIZE 64
9 
10 /**
11  * Force alignment
12  */
13 #define __rte_aligned(a) __attribute__((__aligned__(a)))
14 
15 /**
16  * Force alignment to cache line.
17  */
18 #define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
19 
20 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
21 
22 enum {
23 	IP_LAST_FRAG_IDX,    /**< index of last fragment */
24 	IP_FIRST_FRAG_IDX,   /**< index of first fragment */
25 	IP_MIN_FRAG_NUM,     /**< minimum number of fragments */
26 	IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
27 	/**< maximum number of fragments per packet */
28 };
29 
30 /** @internal fragmented mbuf */
31 struct ip_frag {
32 	uint16_t ofs;          /**< offset into the packet */
33 	uint16_t len;          /**< length of fragment */
34 	struct rte_mbuf *mb;   /**< fragment mbuf */
35 };
36 
37 /** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
38 struct ip_frag_key {
39 	uint64_t src_dst[4];      /**< src address, first 8 bytes used for IPv4 */
40 	uint32_t id;           /**< dst address */
41 	uint32_t key_len;      /**< src/dst key length */
42 };
43 
44 /*
45  * Tail queue declarations.
46  */
47 #define	TAILQ_HEAD(name, type)						            \
48 struct name {								                    \
49 	struct type *tqh_first;	/* first element */			        \
50 	struct type **tqh_last;	/* addr of last next element */		\
51 }
52 
53 
54 #define	TAILQ_ENTRY(type)						                    \
55 struct {								                            \
56 	struct type *tqe_next;	/* next element */			            \
57 	struct type **tqe_prev;	/* address of previous next element */	\
58 }
59 
60 /**
61  * @internal Fragmented packet to reassemble.
62  * First two entries in the frags[] array are for the last and first fragments.
63  */
64 struct ip_frag_pkt {
65 	TAILQ_ENTRY(ip_frag_pkt) lru;   /**< LRU list */
66 	struct ip_frag_key key;           /**< fragmentation key */
67 	uint64_t             start;       /**< creation timestamp */
68 	uint32_t             total_size;  /**< expected reassembled size */
69 	uint32_t             frag_size;   /**< size of fragments received */
70 	uint32_t             last_idx;    /**< index of next entry to fill */
71 	struct ip_frag       frags[IP_MAX_FRAG_NUM]; /**< fragments */
72 } __rte_cache_aligned;
73 
74 TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
75 
76 /** fragmentation table statistics */
77 struct ip_frag_tbl_stat {
78 	uint64_t find_num;      /**< total # of find/insert attempts. */
79 	uint64_t add_num;       /**< # of add ops. */
80 	uint64_t del_num;       /**< # of del ops. */
81 	uint64_t reuse_num;     /**< # of reuse (del/add) ops. */
82 	uint64_t fail_total;    /**< total # of add failures. */
83 	uint64_t fail_nospace;  /**< # of 'no space' add failures. */
84 } __rte_cache_aligned;
85 
86 /** fragmentation table */
87 struct rte_ip_frag_tbl {
88 	uint64_t             max_cycles;      /**< ttl for table entries. */
89 	uint32_t             entry_mask;      /**< hash value mask. */
90 	uint32_t             max_entries;     /**< max entries allowed. */
91 	uint32_t             use_entries;     /**< entries in use. */
92 	uint32_t             bucket_entries;  /**< hash assocaitivity. */
93 	uint32_t             nb_entries;      /**< total size of the table. */
94 	uint32_t             nb_buckets;      /**< num of associativity lines. */
95 	struct ip_frag_pkt *last;         /**< last used entry. */
96 	struct ip_pkt_list lru;           /**< LRU list for table entries. */
97 	struct ip_frag_tbl_stat stat;     /**< statistics counters. */
98 	__extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */
99 };
100