1 #ifndef _NF_FLOW_TABLE_H
2 #define _NF_FLOW_TABLE_H
3
4 #include <linux/in.h>
5 #include <linux/in6.h>
6 #include <linux/netdevice.h>
7 #include <linux/rhashtable-types.h>
8 #include <linux/rcupdate.h>
9 #include <linux/netfilter.h>
10 #include <linux/netfilter/nf_conntrack_tuple_common.h>
11 #include <net/flow_offload.h>
12 #include <net/dst.h>
13
14 struct nf_flowtable;
15 struct nf_flow_rule;
16 struct flow_offload;
17 enum flow_offload_tuple_dir;
18
19 struct nf_flow_key {
20 struct flow_dissector_key_meta meta;
21 struct flow_dissector_key_control control;
22 struct flow_dissector_key_control enc_control;
23 struct flow_dissector_key_basic basic;
24 union {
25 struct flow_dissector_key_ipv4_addrs ipv4;
26 struct flow_dissector_key_ipv6_addrs ipv6;
27 };
28 struct flow_dissector_key_keyid enc_key_id;
29 union {
30 struct flow_dissector_key_ipv4_addrs enc_ipv4;
31 struct flow_dissector_key_ipv6_addrs enc_ipv6;
32 };
33 struct flow_dissector_key_tcp tcp;
34 struct flow_dissector_key_ports tp;
35 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
36
37 struct nf_flow_match {
38 struct flow_dissector dissector;
39 struct nf_flow_key key;
40 struct nf_flow_key mask;
41 };
42
43 struct nf_flow_rule {
44 struct nf_flow_match match;
45 struct flow_rule *rule;
46 };
47
48 struct nf_flowtable_type {
49 struct list_head list;
50 int family;
51 int (*init)(struct nf_flowtable *ft);
52 int (*setup)(struct nf_flowtable *ft,
53 struct net_device *dev,
54 enum flow_block_command cmd);
55 int (*action)(struct net *net,
56 const struct flow_offload *flow,
57 enum flow_offload_tuple_dir dir,
58 struct nf_flow_rule *flow_rule);
59 void (*free)(struct nf_flowtable *ft);
60 nf_hookfn *hook;
61 struct module *owner;
62 };
63
64 enum nf_flowtable_flags {
65 NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
66 NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
67 };
68
69 struct nf_flowtable {
70 struct list_head list;
71 struct rhashtable rhashtable;
72 int priority;
73 const struct nf_flowtable_type *type;
74 struct delayed_work gc_work;
75 unsigned int flags;
76 struct flow_block flow_block;
77 struct rw_semaphore flow_block_lock; /* Guards flow_block */
78 possible_net_t net;
79 };
80
nf_flowtable_hw_offload(struct nf_flowtable * flowtable)81 static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
82 {
83 return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
84 }
85
86 enum flow_offload_tuple_dir {
87 FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
88 FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
89 FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
90 };
91
92 struct flow_offload_tuple {
93 union {
94 struct in_addr src_v4;
95 struct in6_addr src_v6;
96 };
97 union {
98 struct in_addr dst_v4;
99 struct in6_addr dst_v6;
100 };
101 struct {
102 __be16 src_port;
103 __be16 dst_port;
104 };
105
106 int iifidx;
107
108 u8 l3proto;
109 u8 l4proto;
110 u8 dir;
111
112 u16 mtu;
113
114 struct dst_entry *dst_cache;
115 };
116
117 struct flow_offload_tuple_rhash {
118 struct rhash_head node;
119 struct flow_offload_tuple tuple;
120 };
121
122 enum nf_flow_flags {
123 NF_FLOW_SNAT,
124 NF_FLOW_DNAT,
125 NF_FLOW_TEARDOWN,
126 NF_FLOW_HW,
127 NF_FLOW_HW_DYING,
128 NF_FLOW_HW_DEAD,
129 NF_FLOW_HW_PENDING,
130 };
131
132 enum flow_offload_type {
133 NF_FLOW_OFFLOAD_UNSPEC = 0,
134 NF_FLOW_OFFLOAD_ROUTE,
135 };
136
137 struct flow_offload {
138 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
139 struct nf_conn *ct;
140 unsigned long flags;
141 u16 type;
142 u32 timeout;
143 struct rcu_head rcu_head;
144 };
145
146 #define NF_FLOW_TIMEOUT (30 * HZ)
147 #define nf_flowtable_time_stamp (u32)jiffies
148
nf_flow_timeout_delta(unsigned int timeout)149 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
150 {
151 return (__s32)(timeout - nf_flowtable_time_stamp);
152 }
153
154 struct nf_flow_route {
155 struct {
156 struct dst_entry *dst;
157 } tuple[FLOW_OFFLOAD_DIR_MAX];
158 };
159
160 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
161 void flow_offload_free(struct flow_offload *flow);
162
163 static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)164 nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
165 flow_setup_cb_t *cb, void *cb_priv)
166 {
167 struct flow_block *block = &flow_table->flow_block;
168 struct flow_block_cb *block_cb;
169 int err = 0;
170
171 down_write(&flow_table->flow_block_lock);
172 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
173 if (block_cb) {
174 err = -EEXIST;
175 goto unlock;
176 }
177
178 block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
179 if (IS_ERR(block_cb)) {
180 err = PTR_ERR(block_cb);
181 goto unlock;
182 }
183
184 list_add_tail(&block_cb->list, &block->cb_list);
185
186 unlock:
187 up_write(&flow_table->flow_block_lock);
188 return err;
189 }
190
191 static inline void
nf_flow_table_offload_del_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)192 nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
193 flow_setup_cb_t *cb, void *cb_priv)
194 {
195 struct flow_block *block = &flow_table->flow_block;
196 struct flow_block_cb *block_cb;
197
198 down_write(&flow_table->flow_block_lock);
199 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
200 if (block_cb) {
201 list_del(&block_cb->list);
202 flow_block_cb_free(block_cb);
203 } else {
204 WARN_ON(true);
205 }
206 up_write(&flow_table->flow_block_lock);
207 }
208
209 int flow_offload_route_init(struct flow_offload *flow,
210 const struct nf_flow_route *route);
211
212 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
213 void flow_offload_refresh(struct nf_flowtable *flow_table,
214 struct flow_offload *flow);
215
216 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
217 struct flow_offload_tuple *tuple);
218 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
219 struct net_device *dev);
220 void nf_flow_table_cleanup(struct net_device *dev);
221
222 int nf_flow_table_init(struct nf_flowtable *flow_table);
223 void nf_flow_table_free(struct nf_flowtable *flow_table);
224
225 void flow_offload_teardown(struct flow_offload *flow);
226
227 int nf_flow_snat_port(const struct flow_offload *flow,
228 struct sk_buff *skb, unsigned int thoff,
229 u8 protocol, enum flow_offload_tuple_dir dir);
230 int nf_flow_dnat_port(const struct flow_offload *flow,
231 struct sk_buff *skb, unsigned int thoff,
232 u8 protocol, enum flow_offload_tuple_dir dir);
233
234 struct flow_ports {
235 __be16 source, dest;
236 };
237
238 unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
239 const struct nf_hook_state *state);
240 unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
241 const struct nf_hook_state *state);
242
243 #define MODULE_ALIAS_NF_FLOWTABLE(family) \
244 MODULE_ALIAS("nf-flowtable-" __stringify(family))
245
246 void nf_flow_offload_add(struct nf_flowtable *flowtable,
247 struct flow_offload *flow);
248 void nf_flow_offload_del(struct nf_flowtable *flowtable,
249 struct flow_offload *flow);
250 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
251 struct flow_offload *flow);
252
253 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
254 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
255 struct net_device *dev,
256 enum flow_block_command cmd);
257 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
258 enum flow_offload_tuple_dir dir,
259 struct nf_flow_rule *flow_rule);
260 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
261 enum flow_offload_tuple_dir dir,
262 struct nf_flow_rule *flow_rule);
263
264 int nf_flow_table_offload_init(void);
265 void nf_flow_table_offload_exit(void);
266
267 #endif /* _NF_FLOW_TABLE_H */
268