1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
flow_offload_fill_dir(struct flow_offload * flow,enum flow_offload_tuple_dir dir)21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
44 }
45
flow_offload_alloc(struct nf_conn * ct)46 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
47 {
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
52 return NULL;
53
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
56 goto err_ct_refcnt;
57
58 flow->ct = ct;
59
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
62
63 if (ct->status & IPS_SRC_NAT)
64 __set_bit(NF_FLOW_SNAT, &flow->flags);
65 if (ct->status & IPS_DST_NAT)
66 __set_bit(NF_FLOW_DNAT, &flow->flags);
67
68 return flow;
69
70 err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74 }
75 EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
flow_offload_fill_route(struct flow_offload * flow,const struct nf_flow_route * route,enum flow_offload_tuple_dir dir)77 static int flow_offload_fill_route(struct flow_offload *flow,
78 const struct nf_flow_route *route,
79 enum flow_offload_tuple_dir dir)
80 {
81 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
82 struct dst_entry *other_dst = route->tuple[!dir].dst;
83 struct dst_entry *dst = route->tuple[dir].dst;
84
85 if (!dst_hold_safe(route->tuple[dir].dst))
86 return -1;
87
88 switch (flow_tuple->l3proto) {
89 case NFPROTO_IPV4:
90 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
91 break;
92 case NFPROTO_IPV6:
93 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
94 break;
95 }
96
97 flow_tuple->iifidx = other_dst->dev->ifindex;
98 flow_tuple->dst_cache = dst;
99
100 return 0;
101 }
102
flow_offload_route_init(struct flow_offload * flow,const struct nf_flow_route * route)103 int flow_offload_route_init(struct flow_offload *flow,
104 const struct nf_flow_route *route)
105 {
106 int err;
107
108 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
109 if (err < 0)
110 return err;
111
112 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
113 if (err < 0)
114 goto err_route_reply;
115
116 flow->type = NF_FLOW_OFFLOAD_ROUTE;
117
118 return 0;
119
120 err_route_reply:
121 dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
122
123 return err;
124 }
125 EXPORT_SYMBOL_GPL(flow_offload_route_init);
126
flow_offload_fixup_tcp(struct ip_ct_tcp * tcp)127 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
128 {
129 tcp->state = TCP_CONNTRACK_ESTABLISHED;
130 tcp->seen[0].td_maxwin = 0;
131 tcp->seen[1].td_maxwin = 0;
132 }
133
134 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
135 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
136
flow_offload_fixup_ct_timeout(struct nf_conn * ct)137 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
138 {
139 const struct nf_conntrack_l4proto *l4proto;
140 int l4num = nf_ct_protonum(ct);
141 unsigned int timeout;
142
143 l4proto = nf_ct_l4proto_find(l4num);
144 if (!l4proto)
145 return;
146
147 if (l4num == IPPROTO_TCP)
148 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
149 else if (l4num == IPPROTO_UDP)
150 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
151 else
152 return;
153
154 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
155 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
156 }
157
flow_offload_fixup_ct_state(struct nf_conn * ct)158 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
159 {
160 if (nf_ct_protonum(ct) == IPPROTO_TCP)
161 flow_offload_fixup_tcp(&ct->proto.tcp);
162 }
163
flow_offload_fixup_ct(struct nf_conn * ct)164 static void flow_offload_fixup_ct(struct nf_conn *ct)
165 {
166 flow_offload_fixup_ct_state(ct);
167 flow_offload_fixup_ct_timeout(ct);
168 }
169
flow_offload_route_release(struct flow_offload * flow)170 static void flow_offload_route_release(struct flow_offload *flow)
171 {
172 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
173 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
174 }
175
flow_offload_free(struct flow_offload * flow)176 void flow_offload_free(struct flow_offload *flow)
177 {
178 switch (flow->type) {
179 case NF_FLOW_OFFLOAD_ROUTE:
180 flow_offload_route_release(flow);
181 break;
182 default:
183 break;
184 }
185 nf_ct_put(flow->ct);
186 kfree_rcu(flow, rcu_head);
187 }
188 EXPORT_SYMBOL_GPL(flow_offload_free);
189
flow_offload_hash(const void * data,u32 len,u32 seed)190 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
191 {
192 const struct flow_offload_tuple *tuple = data;
193
194 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
195 }
196
flow_offload_hash_obj(const void * data,u32 len,u32 seed)197 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
198 {
199 const struct flow_offload_tuple_rhash *tuplehash = data;
200
201 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
202 }
203
flow_offload_hash_cmp(struct rhashtable_compare_arg * arg,const void * ptr)204 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
205 const void *ptr)
206 {
207 const struct flow_offload_tuple *tuple = arg->key;
208 const struct flow_offload_tuple_rhash *x = ptr;
209
210 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
211 return 1;
212
213 return 0;
214 }
215
216 static const struct rhashtable_params nf_flow_offload_rhash_params = {
217 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
218 .hashfn = flow_offload_hash,
219 .obj_hashfn = flow_offload_hash_obj,
220 .obj_cmpfn = flow_offload_hash_cmp,
221 .automatic_shrinking = true,
222 };
223
flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow)224 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
225 {
226 int err;
227
228 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
229
230 err = rhashtable_insert_fast(&flow_table->rhashtable,
231 &flow->tuplehash[0].node,
232 nf_flow_offload_rhash_params);
233 if (err < 0)
234 return err;
235
236 err = rhashtable_insert_fast(&flow_table->rhashtable,
237 &flow->tuplehash[1].node,
238 nf_flow_offload_rhash_params);
239 if (err < 0) {
240 rhashtable_remove_fast(&flow_table->rhashtable,
241 &flow->tuplehash[0].node,
242 nf_flow_offload_rhash_params);
243 return err;
244 }
245
246 nf_ct_offload_timeout(flow->ct);
247
248 if (nf_flowtable_hw_offload(flow_table)) {
249 __set_bit(NF_FLOW_HW, &flow->flags);
250 nf_flow_offload_add(flow_table, flow);
251 }
252
253 return 0;
254 }
255 EXPORT_SYMBOL_GPL(flow_offload_add);
256
flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow)257 void flow_offload_refresh(struct nf_flowtable *flow_table,
258 struct flow_offload *flow)
259 {
260 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
261
262 if (likely(!nf_flowtable_hw_offload(flow_table)))
263 return;
264
265 nf_flow_offload_add(flow_table, flow);
266 }
267 EXPORT_SYMBOL_GPL(flow_offload_refresh);
268
nf_flow_has_expired(const struct flow_offload * flow)269 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
270 {
271 return nf_flow_timeout_delta(flow->timeout) <= 0;
272 }
273
flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow)274 static void flow_offload_del(struct nf_flowtable *flow_table,
275 struct flow_offload *flow)
276 {
277 rhashtable_remove_fast(&flow_table->rhashtable,
278 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
279 nf_flow_offload_rhash_params);
280 rhashtable_remove_fast(&flow_table->rhashtable,
281 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
282 nf_flow_offload_rhash_params);
283
284 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
285
286 if (nf_flow_has_expired(flow))
287 flow_offload_fixup_ct(flow->ct);
288 else
289 flow_offload_fixup_ct_timeout(flow->ct);
290
291 flow_offload_free(flow);
292 }
293
flow_offload_teardown(struct flow_offload * flow)294 void flow_offload_teardown(struct flow_offload *flow)
295 {
296 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
297
298 flow_offload_fixup_ct_state(flow->ct);
299 }
300 EXPORT_SYMBOL_GPL(flow_offload_teardown);
301
302 struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable * flow_table,struct flow_offload_tuple * tuple)303 flow_offload_lookup(struct nf_flowtable *flow_table,
304 struct flow_offload_tuple *tuple)
305 {
306 struct flow_offload_tuple_rhash *tuplehash;
307 struct flow_offload *flow;
308 int dir;
309
310 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
311 nf_flow_offload_rhash_params);
312 if (!tuplehash)
313 return NULL;
314
315 dir = tuplehash->tuple.dir;
316 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
317 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
318 return NULL;
319
320 if (unlikely(nf_ct_is_dying(flow->ct)))
321 return NULL;
322
323 return tuplehash;
324 }
325 EXPORT_SYMBOL_GPL(flow_offload_lookup);
326
327 static int
nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct flow_offload * flow,void * data),void * data)328 nf_flow_table_iterate(struct nf_flowtable *flow_table,
329 void (*iter)(struct flow_offload *flow, void *data),
330 void *data)
331 {
332 struct flow_offload_tuple_rhash *tuplehash;
333 struct rhashtable_iter hti;
334 struct flow_offload *flow;
335 int err = 0;
336
337 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
338 rhashtable_walk_start(&hti);
339
340 while ((tuplehash = rhashtable_walk_next(&hti))) {
341 if (IS_ERR(tuplehash)) {
342 if (PTR_ERR(tuplehash) != -EAGAIN) {
343 err = PTR_ERR(tuplehash);
344 break;
345 }
346 continue;
347 }
348 if (tuplehash->tuple.dir)
349 continue;
350
351 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
352
353 iter(flow, data);
354 }
355 rhashtable_walk_stop(&hti);
356 rhashtable_walk_exit(&hti);
357
358 return err;
359 }
360
nf_flow_offload_gc_step(struct flow_offload * flow,void * data)361 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
362 {
363 struct nf_flowtable *flow_table = data;
364
365 if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
366 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
367
368 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
369 if (test_bit(NF_FLOW_HW, &flow->flags)) {
370 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
371 nf_flow_offload_del(flow_table, flow);
372 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
373 flow_offload_del(flow_table, flow);
374 } else {
375 flow_offload_del(flow_table, flow);
376 }
377 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
378 nf_flow_offload_stats(flow_table, flow);
379 }
380 }
381
nf_flow_offload_work_gc(struct work_struct * work)382 static void nf_flow_offload_work_gc(struct work_struct *work)
383 {
384 struct nf_flowtable *flow_table;
385
386 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
387 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
388 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
389 }
390
391
nf_flow_nat_port_tcp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)392 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
393 __be16 port, __be16 new_port)
394 {
395 struct tcphdr *tcph;
396
397 if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
398 return -1;
399
400 tcph = (void *)(skb_network_header(skb) + thoff);
401 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
402
403 return 0;
404 }
405
nf_flow_nat_port_udp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)406 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
407 __be16 port, __be16 new_port)
408 {
409 struct udphdr *udph;
410
411 if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
412 return -1;
413
414 udph = (void *)(skb_network_header(skb) + thoff);
415 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
416 inet_proto_csum_replace2(&udph->check, skb, port,
417 new_port, false);
418 if (!udph->check)
419 udph->check = CSUM_MANGLED_0;
420 }
421
422 return 0;
423 }
424
nf_flow_nat_port(struct sk_buff * skb,unsigned int thoff,u8 protocol,__be16 port,__be16 new_port)425 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
426 u8 protocol, __be16 port, __be16 new_port)
427 {
428 switch (protocol) {
429 case IPPROTO_TCP:
430 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
431 return NF_DROP;
432 break;
433 case IPPROTO_UDP:
434 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
435 return NF_DROP;
436 break;
437 }
438
439 return 0;
440 }
441
nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)442 int nf_flow_snat_port(const struct flow_offload *flow,
443 struct sk_buff *skb, unsigned int thoff,
444 u8 protocol, enum flow_offload_tuple_dir dir)
445 {
446 struct flow_ports *hdr;
447 __be16 port, new_port;
448
449 if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
450 return -1;
451
452 hdr = (void *)(skb_network_header(skb) + thoff);
453
454 switch (dir) {
455 case FLOW_OFFLOAD_DIR_ORIGINAL:
456 port = hdr->source;
457 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
458 hdr->source = new_port;
459 break;
460 case FLOW_OFFLOAD_DIR_REPLY:
461 port = hdr->dest;
462 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
463 hdr->dest = new_port;
464 break;
465 default:
466 return -1;
467 }
468
469 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
470 }
471 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
472
nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)473 int nf_flow_dnat_port(const struct flow_offload *flow,
474 struct sk_buff *skb, unsigned int thoff,
475 u8 protocol, enum flow_offload_tuple_dir dir)
476 {
477 struct flow_ports *hdr;
478 __be16 port, new_port;
479
480 if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
481 return -1;
482
483 hdr = (void *)(skb_network_header(skb) + thoff);
484
485 switch (dir) {
486 case FLOW_OFFLOAD_DIR_ORIGINAL:
487 port = hdr->dest;
488 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
489 hdr->dest = new_port;
490 break;
491 case FLOW_OFFLOAD_DIR_REPLY:
492 port = hdr->source;
493 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
494 hdr->source = new_port;
495 break;
496 default:
497 return -1;
498 }
499
500 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
501 }
502 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
503
nf_flow_table_init(struct nf_flowtable * flowtable)504 int nf_flow_table_init(struct nf_flowtable *flowtable)
505 {
506 int err;
507
508 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
509 flow_block_init(&flowtable->flow_block);
510 init_rwsem(&flowtable->flow_block_lock);
511
512 err = rhashtable_init(&flowtable->rhashtable,
513 &nf_flow_offload_rhash_params);
514 if (err < 0)
515 return err;
516
517 queue_delayed_work(system_power_efficient_wq,
518 &flowtable->gc_work, HZ);
519
520 mutex_lock(&flowtable_lock);
521 list_add(&flowtable->list, &flowtables);
522 mutex_unlock(&flowtable_lock);
523
524 return 0;
525 }
526 EXPORT_SYMBOL_GPL(nf_flow_table_init);
527
nf_flow_table_do_cleanup(struct flow_offload * flow,void * data)528 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
529 {
530 struct net_device *dev = data;
531
532 if (!dev) {
533 flow_offload_teardown(flow);
534 return;
535 }
536
537 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
538 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
539 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
540 flow_offload_teardown(flow);
541 }
542
nf_flow_table_gc_cleanup(struct nf_flowtable * flowtable,struct net_device * dev)543 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
544 struct net_device *dev)
545 {
546 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
547 flush_delayed_work(&flowtable->gc_work);
548 nf_flow_table_offload_flush(flowtable);
549 }
550
nf_flow_table_cleanup(struct net_device * dev)551 void nf_flow_table_cleanup(struct net_device *dev)
552 {
553 struct nf_flowtable *flowtable;
554
555 mutex_lock(&flowtable_lock);
556 list_for_each_entry(flowtable, &flowtables, list)
557 nf_flow_table_gc_cleanup(flowtable, dev);
558 mutex_unlock(&flowtable_lock);
559 }
560 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
561
nf_flow_table_free(struct nf_flowtable * flow_table)562 void nf_flow_table_free(struct nf_flowtable *flow_table)
563 {
564 mutex_lock(&flowtable_lock);
565 list_del(&flow_table->list);
566 mutex_unlock(&flowtable_lock);
567
568 cancel_delayed_work_sync(&flow_table->gc_work);
569 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
570 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
571 nf_flow_table_offload_flush(flow_table);
572 if (nf_flowtable_hw_offload(flow_table))
573 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
574 flow_table);
575 rhashtable_destroy(&flow_table->rhashtable);
576 }
577 EXPORT_SYMBOL_GPL(nf_flow_table_free);
578
nf_flow_table_module_init(void)579 static int __init nf_flow_table_module_init(void)
580 {
581 return nf_flow_table_offload_init();
582 }
583
nf_flow_table_module_exit(void)584 static void __exit nf_flow_table_module_exit(void)
585 {
586 nf_flow_table_offload_exit();
587 }
588
589 module_init(nf_flow_table_module_init);
590 module_exit(nf_flow_table_module_exit);
591
592 MODULE_LICENSE("GPL");
593 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
594 MODULE_DESCRIPTION("Netfilter flow table module");
595