1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
33
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
36 #include <net/sock.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56
57 #include "nf_internals.h"
58
59 MODULE_LICENSE("GPL");
60
ctnetlink_dump_tuples_proto(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_l4proto * l4proto)61 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
62 const struct nf_conntrack_tuple *tuple,
63 const struct nf_conntrack_l4proto *l4proto)
64 {
65 int ret = 0;
66 struct nlattr *nest_parms;
67
68 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
69 if (!nest_parms)
70 goto nla_put_failure;
71 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
72 goto nla_put_failure;
73
74 if (likely(l4proto->tuple_to_nlattr))
75 ret = l4proto->tuple_to_nlattr(skb, tuple);
76
77 nla_nest_end(skb, nest_parms);
78
79 return ret;
80
81 nla_put_failure:
82 return -1;
83 }
84
ipv4_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)85 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
86 const struct nf_conntrack_tuple *tuple)
87 {
88 if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
89 nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
90 return -EMSGSIZE;
91 return 0;
92 }
93
ipv6_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)94 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
95 const struct nf_conntrack_tuple *tuple)
96 {
97 if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
98 nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
99 return -EMSGSIZE;
100 return 0;
101 }
102
ctnetlink_dump_tuples_ip(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)103 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
104 const struct nf_conntrack_tuple *tuple)
105 {
106 int ret = 0;
107 struct nlattr *nest_parms;
108
109 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
110 if (!nest_parms)
111 goto nla_put_failure;
112
113 switch (tuple->src.l3num) {
114 case NFPROTO_IPV4:
115 ret = ipv4_tuple_to_nlattr(skb, tuple);
116 break;
117 case NFPROTO_IPV6:
118 ret = ipv6_tuple_to_nlattr(skb, tuple);
119 break;
120 }
121
122 nla_nest_end(skb, nest_parms);
123
124 return ret;
125
126 nla_put_failure:
127 return -1;
128 }
129
ctnetlink_dump_tuples(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)130 static int ctnetlink_dump_tuples(struct sk_buff *skb,
131 const struct nf_conntrack_tuple *tuple)
132 {
133 const struct nf_conntrack_l4proto *l4proto;
134 int ret;
135
136 rcu_read_lock();
137 ret = ctnetlink_dump_tuples_ip(skb, tuple);
138
139 if (ret >= 0) {
140 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
141 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
142 }
143 rcu_read_unlock();
144 return ret;
145 }
146
ctnetlink_dump_zone_id(struct sk_buff * skb,int attrtype,const struct nf_conntrack_zone * zone,int dir)147 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
148 const struct nf_conntrack_zone *zone, int dir)
149 {
150 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
151 return 0;
152 if (nla_put_be16(skb, attrtype, htons(zone->id)))
153 goto nla_put_failure;
154 return 0;
155
156 nla_put_failure:
157 return -1;
158 }
159
ctnetlink_dump_status(struct sk_buff * skb,const struct nf_conn * ct)160 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
161 {
162 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
163 goto nla_put_failure;
164 return 0;
165
166 nla_put_failure:
167 return -1;
168 }
169
ctnetlink_dump_timeout(struct sk_buff * skb,const struct nf_conn * ct)170 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
171 {
172 long timeout = nf_ct_expires(ct) / HZ;
173
174 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
175 goto nla_put_failure;
176 return 0;
177
178 nla_put_failure:
179 return -1;
180 }
181
ctnetlink_dump_protoinfo(struct sk_buff * skb,struct nf_conn * ct)182 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
183 {
184 const struct nf_conntrack_l4proto *l4proto;
185 struct nlattr *nest_proto;
186 int ret;
187
188 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
189 if (!l4proto->to_nlattr)
190 return 0;
191
192 nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
193 if (!nest_proto)
194 goto nla_put_failure;
195
196 ret = l4proto->to_nlattr(skb, nest_proto, ct);
197
198 nla_nest_end(skb, nest_proto);
199
200 return ret;
201
202 nla_put_failure:
203 return -1;
204 }
205
ctnetlink_dump_helpinfo(struct sk_buff * skb,const struct nf_conn * ct)206 static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
207 const struct nf_conn *ct)
208 {
209 struct nlattr *nest_helper;
210 const struct nf_conn_help *help = nfct_help(ct);
211 struct nf_conntrack_helper *helper;
212
213 if (!help)
214 return 0;
215
216 rcu_read_lock();
217 helper = rcu_dereference(help->helper);
218 if (!helper)
219 goto out;
220
221 nest_helper = nla_nest_start(skb, CTA_HELP);
222 if (!nest_helper)
223 goto nla_put_failure;
224 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
225 goto nla_put_failure;
226
227 if (helper->to_nlattr)
228 helper->to_nlattr(skb, ct);
229
230 nla_nest_end(skb, nest_helper);
231 out:
232 rcu_read_unlock();
233 return 0;
234
235 nla_put_failure:
236 rcu_read_unlock();
237 return -1;
238 }
239
240 static int
dump_counters(struct sk_buff * skb,struct nf_conn_acct * acct,enum ip_conntrack_dir dir,int type)241 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
242 enum ip_conntrack_dir dir, int type)
243 {
244 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
245 struct nf_conn_counter *counter = acct->counter;
246 struct nlattr *nest_count;
247 u64 pkts, bytes;
248
249 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
250 pkts = atomic64_xchg(&counter[dir].packets, 0);
251 bytes = atomic64_xchg(&counter[dir].bytes, 0);
252 } else {
253 pkts = atomic64_read(&counter[dir].packets);
254 bytes = atomic64_read(&counter[dir].bytes);
255 }
256
257 nest_count = nla_nest_start(skb, attr);
258 if (!nest_count)
259 goto nla_put_failure;
260
261 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
262 CTA_COUNTERS_PAD) ||
263 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
264 CTA_COUNTERS_PAD))
265 goto nla_put_failure;
266
267 nla_nest_end(skb, nest_count);
268
269 return 0;
270
271 nla_put_failure:
272 return -1;
273 }
274
275 static int
ctnetlink_dump_acct(struct sk_buff * skb,const struct nf_conn * ct,int type)276 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
277 {
278 struct nf_conn_acct *acct = nf_conn_acct_find(ct);
279
280 if (!acct)
281 return 0;
282
283 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
284 return -1;
285 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
286 return -1;
287
288 return 0;
289 }
290
291 static int
ctnetlink_dump_timestamp(struct sk_buff * skb,const struct nf_conn * ct)292 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
293 {
294 struct nlattr *nest_count;
295 const struct nf_conn_tstamp *tstamp;
296
297 tstamp = nf_conn_tstamp_find(ct);
298 if (!tstamp)
299 return 0;
300
301 nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
302 if (!nest_count)
303 goto nla_put_failure;
304
305 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
306 CTA_TIMESTAMP_PAD) ||
307 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
308 cpu_to_be64(tstamp->stop),
309 CTA_TIMESTAMP_PAD)))
310 goto nla_put_failure;
311 nla_nest_end(skb, nest_count);
312
313 return 0;
314
315 nla_put_failure:
316 return -1;
317 }
318
319 #ifdef CONFIG_NF_CONNTRACK_MARK
ctnetlink_dump_mark(struct sk_buff * skb,const struct nf_conn * ct)320 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
321 {
322 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
323 goto nla_put_failure;
324 return 0;
325
326 nla_put_failure:
327 return -1;
328 }
329 #else
330 #define ctnetlink_dump_mark(a, b) (0)
331 #endif
332
333 #ifdef CONFIG_NF_CONNTRACK_SECMARK
ctnetlink_dump_secctx(struct sk_buff * skb,const struct nf_conn * ct)334 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
335 {
336 struct nlattr *nest_secctx;
337 int len, ret;
338 char *secctx;
339
340 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
341 if (ret)
342 return 0;
343
344 ret = -1;
345 nest_secctx = nla_nest_start(skb, CTA_SECCTX);
346 if (!nest_secctx)
347 goto nla_put_failure;
348
349 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
350 goto nla_put_failure;
351 nla_nest_end(skb, nest_secctx);
352
353 ret = 0;
354 nla_put_failure:
355 security_release_secctx(secctx, len);
356 return ret;
357 }
358 #else
359 #define ctnetlink_dump_secctx(a, b) (0)
360 #endif
361
362 #ifdef CONFIG_NF_CONNTRACK_LABELS
ctnetlink_label_size(const struct nf_conn * ct)363 static inline int ctnetlink_label_size(const struct nf_conn *ct)
364 {
365 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
366
367 if (!labels)
368 return 0;
369 return nla_total_size(sizeof(labels->bits));
370 }
371
372 static int
ctnetlink_dump_labels(struct sk_buff * skb,const struct nf_conn * ct)373 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
374 {
375 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
376 unsigned int i;
377
378 if (!labels)
379 return 0;
380
381 i = 0;
382 do {
383 if (labels->bits[i] != 0)
384 return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
385 labels->bits);
386 i++;
387 } while (i < ARRAY_SIZE(labels->bits));
388
389 return 0;
390 }
391 #else
392 #define ctnetlink_dump_labels(a, b) (0)
393 #define ctnetlink_label_size(a) (0)
394 #endif
395
396 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
397
ctnetlink_dump_master(struct sk_buff * skb,const struct nf_conn * ct)398 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
399 {
400 struct nlattr *nest_parms;
401
402 if (!(ct->status & IPS_EXPECTED))
403 return 0;
404
405 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
406 if (!nest_parms)
407 goto nla_put_failure;
408 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
409 goto nla_put_failure;
410 nla_nest_end(skb, nest_parms);
411
412 return 0;
413
414 nla_put_failure:
415 return -1;
416 }
417
418 static int
dump_ct_seq_adj(struct sk_buff * skb,const struct nf_ct_seqadj * seq,int type)419 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
420 {
421 struct nlattr *nest_parms;
422
423 nest_parms = nla_nest_start(skb, type);
424 if (!nest_parms)
425 goto nla_put_failure;
426
427 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
428 htonl(seq->correction_pos)) ||
429 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
430 htonl(seq->offset_before)) ||
431 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
432 htonl(seq->offset_after)))
433 goto nla_put_failure;
434
435 nla_nest_end(skb, nest_parms);
436
437 return 0;
438
439 nla_put_failure:
440 return -1;
441 }
442
ctnetlink_dump_ct_seq_adj(struct sk_buff * skb,struct nf_conn * ct)443 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
444 {
445 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
446 struct nf_ct_seqadj *seq;
447
448 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
449 return 0;
450
451 spin_lock_bh(&ct->lock);
452 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
453 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
454 goto err;
455
456 seq = &seqadj->seq[IP_CT_DIR_REPLY];
457 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
458 goto err;
459
460 spin_unlock_bh(&ct->lock);
461 return 0;
462 err:
463 spin_unlock_bh(&ct->lock);
464 return -1;
465 }
466
ctnetlink_dump_ct_synproxy(struct sk_buff * skb,struct nf_conn * ct)467 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
468 {
469 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
470 struct nlattr *nest_parms;
471
472 if (!synproxy)
473 return 0;
474
475 nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
476 if (!nest_parms)
477 goto nla_put_failure;
478
479 if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
480 nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
481 nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
482 goto nla_put_failure;
483
484 nla_nest_end(skb, nest_parms);
485
486 return 0;
487
488 nla_put_failure:
489 return -1;
490 }
491
ctnetlink_dump_id(struct sk_buff * skb,const struct nf_conn * ct)492 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
493 {
494 __be32 id = (__force __be32)nf_ct_get_id(ct);
495
496 if (nla_put_be32(skb, CTA_ID, id))
497 goto nla_put_failure;
498 return 0;
499
500 nla_put_failure:
501 return -1;
502 }
503
ctnetlink_dump_use(struct sk_buff * skb,const struct nf_conn * ct)504 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
505 {
506 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
507 goto nla_put_failure;
508 return 0;
509
510 nla_put_failure:
511 return -1;
512 }
513
514 /* all these functions access ct->ext. Caller must either hold a reference
515 * on ct or prevent its deletion by holding either the bucket spinlock or
516 * pcpu dying list lock.
517 */
ctnetlink_dump_extinfo(struct sk_buff * skb,struct nf_conn * ct,u32 type)518 static int ctnetlink_dump_extinfo(struct sk_buff *skb,
519 struct nf_conn *ct, u32 type)
520 {
521 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
522 ctnetlink_dump_timestamp(skb, ct) < 0 ||
523 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
524 ctnetlink_dump_labels(skb, ct) < 0 ||
525 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
526 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
527 return -1;
528
529 return 0;
530 }
531
ctnetlink_dump_info(struct sk_buff * skb,struct nf_conn * ct)532 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
533 {
534 if (ctnetlink_dump_status(skb, ct) < 0 ||
535 ctnetlink_dump_mark(skb, ct) < 0 ||
536 ctnetlink_dump_secctx(skb, ct) < 0 ||
537 ctnetlink_dump_id(skb, ct) < 0 ||
538 ctnetlink_dump_use(skb, ct) < 0 ||
539 ctnetlink_dump_master(skb, ct) < 0)
540 return -1;
541
542 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
543 (ctnetlink_dump_timeout(skb, ct) < 0 ||
544 ctnetlink_dump_protoinfo(skb, ct) < 0))
545 return -1;
546
547 return 0;
548 }
549
550 static int
ctnetlink_fill_info(struct sk_buff * skb,u32 portid,u32 seq,u32 type,struct nf_conn * ct,bool extinfo,unsigned int flags)551 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
552 struct nf_conn *ct, bool extinfo, unsigned int flags)
553 {
554 const struct nf_conntrack_zone *zone;
555 struct nlmsghdr *nlh;
556 struct nfgenmsg *nfmsg;
557 struct nlattr *nest_parms;
558 unsigned int event;
559
560 if (portid)
561 flags |= NLM_F_MULTI;
562 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
563 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
564 if (nlh == NULL)
565 goto nlmsg_failure;
566
567 nfmsg = nlmsg_data(nlh);
568 nfmsg->nfgen_family = nf_ct_l3num(ct);
569 nfmsg->version = NFNETLINK_V0;
570 nfmsg->res_id = 0;
571
572 zone = nf_ct_zone(ct);
573
574 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
575 if (!nest_parms)
576 goto nla_put_failure;
577 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
578 goto nla_put_failure;
579 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
580 NF_CT_ZONE_DIR_ORIG) < 0)
581 goto nla_put_failure;
582 nla_nest_end(skb, nest_parms);
583
584 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
585 if (!nest_parms)
586 goto nla_put_failure;
587 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
588 goto nla_put_failure;
589 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
590 NF_CT_ZONE_DIR_REPL) < 0)
591 goto nla_put_failure;
592 nla_nest_end(skb, nest_parms);
593
594 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
595 NF_CT_DEFAULT_ZONE_DIR) < 0)
596 goto nla_put_failure;
597
598 if (ctnetlink_dump_info(skb, ct) < 0)
599 goto nla_put_failure;
600 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
601 goto nla_put_failure;
602
603 nlmsg_end(skb, nlh);
604 return skb->len;
605
606 nlmsg_failure:
607 nla_put_failure:
608 nlmsg_cancel(skb, nlh);
609 return -1;
610 }
611
612 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
613 [CTA_IP_V4_SRC] = { .type = NLA_U32 },
614 [CTA_IP_V4_DST] = { .type = NLA_U32 },
615 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 },
616 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 },
617 };
618
619 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
ctnetlink_proto_size(const struct nf_conn * ct)620 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
621 {
622 const struct nf_conntrack_l4proto *l4proto;
623 size_t len, len4 = 0;
624
625 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
626 len *= 3u; /* ORIG, REPLY, MASTER */
627
628 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
629 len += l4proto->nlattr_size;
630 if (l4proto->nlattr_tuple_size) {
631 len4 = l4proto->nlattr_tuple_size();
632 len4 *= 3u; /* ORIG, REPLY, MASTER */
633 }
634
635 return len + len4;
636 }
637 #endif
638
ctnetlink_acct_size(const struct nf_conn * ct)639 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
640 {
641 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
642 return 0;
643 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
644 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
645 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
646 ;
647 }
648
ctnetlink_secctx_size(const struct nf_conn * ct)649 static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
650 {
651 #ifdef CONFIG_NF_CONNTRACK_SECMARK
652 int len, ret;
653
654 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
655 if (ret)
656 return 0;
657
658 return nla_total_size(0) /* CTA_SECCTX */
659 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
660 #else
661 return 0;
662 #endif
663 }
664
ctnetlink_timestamp_size(const struct nf_conn * ct)665 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
666 {
667 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
668 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
669 return 0;
670 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
671 #else
672 return 0;
673 #endif
674 }
675
676 #ifdef CONFIG_NF_CONNTRACK_EVENTS
ctnetlink_nlmsg_size(const struct nf_conn * ct)677 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
678 {
679 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
680 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
681 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
682 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
683 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
684 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
685 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
686 + ctnetlink_acct_size(ct)
687 + ctnetlink_timestamp_size(ct)
688 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
689 + nla_total_size(0) /* CTA_PROTOINFO */
690 + nla_total_size(0) /* CTA_HELP */
691 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
692 + ctnetlink_secctx_size(ct)
693 #if IS_ENABLED(CONFIG_NF_NAT)
694 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
695 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
696 #endif
697 #ifdef CONFIG_NF_CONNTRACK_MARK
698 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
699 #endif
700 #ifdef CONFIG_NF_CONNTRACK_ZONES
701 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
702 #endif
703 + ctnetlink_proto_size(ct)
704 + ctnetlink_label_size(ct)
705 ;
706 }
707
708 static int
ctnetlink_conntrack_event(unsigned int events,struct nf_ct_event * item)709 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
710 {
711 const struct nf_conntrack_zone *zone;
712 struct net *net;
713 struct nlmsghdr *nlh;
714 struct nfgenmsg *nfmsg;
715 struct nlattr *nest_parms;
716 struct nf_conn *ct = item->ct;
717 struct sk_buff *skb;
718 unsigned int type;
719 unsigned int flags = 0, group;
720 int err;
721
722 if (events & (1 << IPCT_DESTROY)) {
723 type = IPCTNL_MSG_CT_DELETE;
724 group = NFNLGRP_CONNTRACK_DESTROY;
725 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
726 type = IPCTNL_MSG_CT_NEW;
727 flags = NLM_F_CREATE|NLM_F_EXCL;
728 group = NFNLGRP_CONNTRACK_NEW;
729 } else if (events) {
730 type = IPCTNL_MSG_CT_NEW;
731 group = NFNLGRP_CONNTRACK_UPDATE;
732 } else
733 return 0;
734
735 net = nf_ct_net(ct);
736 if (!item->report && !nfnetlink_has_listeners(net, group))
737 return 0;
738
739 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
740 if (skb == NULL)
741 goto errout;
742
743 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
744 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
745 if (nlh == NULL)
746 goto nlmsg_failure;
747
748 nfmsg = nlmsg_data(nlh);
749 nfmsg->nfgen_family = nf_ct_l3num(ct);
750 nfmsg->version = NFNETLINK_V0;
751 nfmsg->res_id = 0;
752
753 zone = nf_ct_zone(ct);
754
755 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
756 if (!nest_parms)
757 goto nla_put_failure;
758 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
759 goto nla_put_failure;
760 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
761 NF_CT_ZONE_DIR_ORIG) < 0)
762 goto nla_put_failure;
763 nla_nest_end(skb, nest_parms);
764
765 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
766 if (!nest_parms)
767 goto nla_put_failure;
768 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
769 goto nla_put_failure;
770 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
771 NF_CT_ZONE_DIR_REPL) < 0)
772 goto nla_put_failure;
773 nla_nest_end(skb, nest_parms);
774
775 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
776 NF_CT_DEFAULT_ZONE_DIR) < 0)
777 goto nla_put_failure;
778
779 if (ctnetlink_dump_id(skb, ct) < 0)
780 goto nla_put_failure;
781
782 if (ctnetlink_dump_status(skb, ct) < 0)
783 goto nla_put_failure;
784
785 if (events & (1 << IPCT_DESTROY)) {
786 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
787 ctnetlink_dump_timestamp(skb, ct) < 0)
788 goto nla_put_failure;
789 } else {
790 if (ctnetlink_dump_timeout(skb, ct) < 0)
791 goto nla_put_failure;
792
793 if (events & (1 << IPCT_PROTOINFO)
794 && ctnetlink_dump_protoinfo(skb, ct) < 0)
795 goto nla_put_failure;
796
797 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
798 && ctnetlink_dump_helpinfo(skb, ct) < 0)
799 goto nla_put_failure;
800
801 #ifdef CONFIG_NF_CONNTRACK_SECMARK
802 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
803 && ctnetlink_dump_secctx(skb, ct) < 0)
804 goto nla_put_failure;
805 #endif
806 if (events & (1 << IPCT_LABEL) &&
807 ctnetlink_dump_labels(skb, ct) < 0)
808 goto nla_put_failure;
809
810 if (events & (1 << IPCT_RELATED) &&
811 ctnetlink_dump_master(skb, ct) < 0)
812 goto nla_put_failure;
813
814 if (events & (1 << IPCT_SEQADJ) &&
815 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
816 goto nla_put_failure;
817
818 if (events & (1 << IPCT_SYNPROXY) &&
819 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
820 goto nla_put_failure;
821 }
822
823 #ifdef CONFIG_NF_CONNTRACK_MARK
824 if ((events & (1 << IPCT_MARK) || ct->mark)
825 && ctnetlink_dump_mark(skb, ct) < 0)
826 goto nla_put_failure;
827 #endif
828 nlmsg_end(skb, nlh);
829 err = nfnetlink_send(skb, net, item->portid, group, item->report,
830 GFP_ATOMIC);
831 if (err == -ENOBUFS || err == -EAGAIN)
832 return -ENOBUFS;
833
834 return 0;
835
836 nla_put_failure:
837 nlmsg_cancel(skb, nlh);
838 nlmsg_failure:
839 kfree_skb(skb);
840 errout:
841 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
842 return -ENOBUFS;
843
844 return 0;
845 }
846 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
847
ctnetlink_done(struct netlink_callback * cb)848 static int ctnetlink_done(struct netlink_callback *cb)
849 {
850 if (cb->args[1])
851 nf_ct_put((struct nf_conn *)cb->args[1]);
852 kfree(cb->data);
853 return 0;
854 }
855
856 struct ctnetlink_filter {
857 u8 family;
858
859 u_int32_t orig_flags;
860 u_int32_t reply_flags;
861
862 struct nf_conntrack_tuple orig;
863 struct nf_conntrack_tuple reply;
864 struct nf_conntrack_zone zone;
865
866 struct {
867 u_int32_t val;
868 u_int32_t mask;
869 } mark;
870 };
871
872 static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
873 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 },
874 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 },
875 };
876
ctnetlink_parse_filter(const struct nlattr * attr,struct ctnetlink_filter * filter)877 static int ctnetlink_parse_filter(const struct nlattr *attr,
878 struct ctnetlink_filter *filter)
879 {
880 struct nlattr *tb[CTA_FILTER_MAX + 1];
881 int ret = 0;
882
883 ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy,
884 NULL);
885 if (ret)
886 return ret;
887
888 if (tb[CTA_FILTER_ORIG_FLAGS]) {
889 filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]);
890 if (filter->orig_flags & ~CTA_FILTER_F_ALL)
891 return -EOPNOTSUPP;
892 }
893
894 if (tb[CTA_FILTER_REPLY_FLAGS]) {
895 filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]);
896 if (filter->reply_flags & ~CTA_FILTER_F_ALL)
897 return -EOPNOTSUPP;
898 }
899
900 return 0;
901 }
902
903 static int ctnetlink_parse_zone(const struct nlattr *attr,
904 struct nf_conntrack_zone *zone);
905 static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
906 struct nf_conntrack_tuple *tuple,
907 u32 type, u_int8_t l3num,
908 struct nf_conntrack_zone *zone,
909 u_int32_t flags);
910
911 static struct ctnetlink_filter *
ctnetlink_alloc_filter(const struct nlattr * const cda[],u8 family)912 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
913 {
914 struct ctnetlink_filter *filter;
915 int err;
916
917 #ifndef CONFIG_NF_CONNTRACK_MARK
918 if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
919 return ERR_PTR(-EOPNOTSUPP);
920 #endif
921
922 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
923 if (filter == NULL)
924 return ERR_PTR(-ENOMEM);
925
926 filter->family = family;
927
928 #ifdef CONFIG_NF_CONNTRACK_MARK
929 if (cda[CTA_MARK]) {
930 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
931 if (cda[CTA_MARK_MASK])
932 filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
933 else
934 filter->mark.mask = 0xffffffff;
935 } else if (cda[CTA_MARK_MASK]) {
936 err = -EINVAL;
937 goto err_filter;
938 }
939 #endif
940 if (!cda[CTA_FILTER])
941 return filter;
942
943 err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
944 if (err < 0)
945 goto err_filter;
946
947 err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
948 if (err < 0)
949 goto err_filter;
950
951 if (filter->orig_flags) {
952 if (!cda[CTA_TUPLE_ORIG]) {
953 err = -EINVAL;
954 goto err_filter;
955 }
956
957 err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
958 CTA_TUPLE_ORIG,
959 filter->family,
960 &filter->zone,
961 filter->orig_flags);
962 if (err < 0)
963 goto err_filter;
964 }
965
966 if (filter->reply_flags) {
967 if (!cda[CTA_TUPLE_REPLY]) {
968 err = -EINVAL;
969 goto err_filter;
970 }
971
972 err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
973 CTA_TUPLE_REPLY,
974 filter->family,
975 &filter->zone,
976 filter->reply_flags);
977 if (err < 0)
978 goto err_filter;
979 }
980
981 return filter;
982
983 err_filter:
984 kfree(filter);
985
986 return ERR_PTR(err);
987 }
988
ctnetlink_needs_filter(u8 family,const struct nlattr * const * cda)989 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
990 {
991 return family || cda[CTA_MARK] || cda[CTA_FILTER];
992 }
993
ctnetlink_start(struct netlink_callback * cb)994 static int ctnetlink_start(struct netlink_callback *cb)
995 {
996 const struct nlattr * const *cda = cb->data;
997 struct ctnetlink_filter *filter = NULL;
998 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
999 u8 family = nfmsg->nfgen_family;
1000
1001 if (ctnetlink_needs_filter(family, cda)) {
1002 filter = ctnetlink_alloc_filter(cda, family);
1003 if (IS_ERR(filter))
1004 return PTR_ERR(filter);
1005 }
1006
1007 cb->data = filter;
1008 return 0;
1009 }
1010
ctnetlink_filter_match_tuple(struct nf_conntrack_tuple * filter_tuple,struct nf_conntrack_tuple * ct_tuple,u_int32_t flags,int family)1011 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
1012 struct nf_conntrack_tuple *ct_tuple,
1013 u_int32_t flags, int family)
1014 {
1015 switch (family) {
1016 case NFPROTO_IPV4:
1017 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1018 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
1019 return 0;
1020
1021 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1022 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
1023 return 0;
1024 break;
1025 case NFPROTO_IPV6:
1026 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1027 !ipv6_addr_cmp(&filter_tuple->src.u3.in6,
1028 &ct_tuple->src.u3.in6))
1029 return 0;
1030
1031 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1032 !ipv6_addr_cmp(&filter_tuple->dst.u3.in6,
1033 &ct_tuple->dst.u3.in6))
1034 return 0;
1035 break;
1036 }
1037
1038 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
1039 filter_tuple->dst.protonum != ct_tuple->dst.protonum)
1040 return 0;
1041
1042 switch (ct_tuple->dst.protonum) {
1043 case IPPROTO_TCP:
1044 case IPPROTO_UDP:
1045 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
1046 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
1047 return 0;
1048
1049 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
1050 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
1051 return 0;
1052 break;
1053 case IPPROTO_ICMP:
1054 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
1055 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1056 return 0;
1057 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
1058 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1059 return 0;
1060 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
1061 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1062 return 0;
1063 break;
1064 case IPPROTO_ICMPV6:
1065 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
1066 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1067 return 0;
1068 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
1069 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1070 return 0;
1071 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
1072 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1073 return 0;
1074 break;
1075 }
1076
1077 return 1;
1078 }
1079
ctnetlink_filter_match(struct nf_conn * ct,void * data)1080 static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
1081 {
1082 struct ctnetlink_filter *filter = data;
1083 struct nf_conntrack_tuple *tuple;
1084
1085 if (filter == NULL)
1086 goto out;
1087
1088 /* Match entries of a given L3 protocol number.
1089 * If it is not specified, ie. l3proto == 0,
1090 * then match everything.
1091 */
1092 if (filter->family && nf_ct_l3num(ct) != filter->family)
1093 goto ignore_entry;
1094
1095 if (filter->orig_flags) {
1096 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
1097 if (!ctnetlink_filter_match_tuple(&filter->orig, tuple,
1098 filter->orig_flags,
1099 filter->family))
1100 goto ignore_entry;
1101 }
1102
1103 if (filter->reply_flags) {
1104 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
1105 if (!ctnetlink_filter_match_tuple(&filter->reply, tuple,
1106 filter->reply_flags,
1107 filter->family))
1108 goto ignore_entry;
1109 }
1110
1111 #ifdef CONFIG_NF_CONNTRACK_MARK
1112 if ((ct->mark & filter->mark.mask) != filter->mark.val)
1113 goto ignore_entry;
1114 #endif
1115
1116 out:
1117 return 1;
1118
1119 ignore_entry:
1120 return 0;
1121 }
1122
1123 static int
ctnetlink_dump_table(struct sk_buff * skb,struct netlink_callback * cb)1124 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1125 {
1126 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
1127 struct net *net = sock_net(skb->sk);
1128 struct nf_conn *ct, *last;
1129 struct nf_conntrack_tuple_hash *h;
1130 struct hlist_nulls_node *n;
1131 struct nf_conn *nf_ct_evict[8];
1132 int res, i;
1133 spinlock_t *lockp;
1134
1135 last = (struct nf_conn *)cb->args[1];
1136 i = 0;
1137
1138 local_bh_disable();
1139 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
1140 restart:
1141 while (i) {
1142 i--;
1143 if (nf_ct_should_gc(nf_ct_evict[i]))
1144 nf_ct_kill(nf_ct_evict[i]);
1145 nf_ct_put(nf_ct_evict[i]);
1146 }
1147
1148 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
1149 nf_conntrack_lock(lockp);
1150 if (cb->args[0] >= nf_conntrack_htable_size) {
1151 spin_unlock(lockp);
1152 goto out;
1153 }
1154 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
1155 hnnode) {
1156 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1157 continue;
1158 ct = nf_ct_tuplehash_to_ctrack(h);
1159 if (nf_ct_is_expired(ct)) {
1160 if (i < ARRAY_SIZE(nf_ct_evict) &&
1161 atomic_inc_not_zero(&ct->ct_general.use))
1162 nf_ct_evict[i++] = ct;
1163 continue;
1164 }
1165
1166 if (!net_eq(net, nf_ct_net(ct)))
1167 continue;
1168
1169 if (cb->args[1]) {
1170 if (ct != last)
1171 continue;
1172 cb->args[1] = 0;
1173 }
1174 if (!ctnetlink_filter_match(ct, cb->data))
1175 continue;
1176
1177 res =
1178 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1179 cb->nlh->nlmsg_seq,
1180 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1181 ct, true, flags);
1182 if (res < 0) {
1183 nf_conntrack_get(&ct->ct_general);
1184 cb->args[1] = (unsigned long)ct;
1185 spin_unlock(lockp);
1186 goto out;
1187 }
1188 }
1189 spin_unlock(lockp);
1190 if (cb->args[1]) {
1191 cb->args[1] = 0;
1192 goto restart;
1193 }
1194 }
1195 out:
1196 local_bh_enable();
1197 if (last) {
1198 /* nf ct hash resize happened, now clear the leftover. */
1199 if ((struct nf_conn *)cb->args[1] == last)
1200 cb->args[1] = 0;
1201
1202 nf_ct_put(last);
1203 }
1204
1205 while (i) {
1206 i--;
1207 if (nf_ct_should_gc(nf_ct_evict[i]))
1208 nf_ct_kill(nf_ct_evict[i]);
1209 nf_ct_put(nf_ct_evict[i]);
1210 }
1211
1212 return skb->len;
1213 }
1214
ipv4_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t,u_int32_t flags)1215 static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1216 struct nf_conntrack_tuple *t,
1217 u_int32_t flags)
1218 {
1219 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1220 if (!tb[CTA_IP_V4_SRC])
1221 return -EINVAL;
1222
1223 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
1224 }
1225
1226 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1227 if (!tb[CTA_IP_V4_DST])
1228 return -EINVAL;
1229
1230 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
1231 }
1232
1233 return 0;
1234 }
1235
ipv6_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t,u_int32_t flags)1236 static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1237 struct nf_conntrack_tuple *t,
1238 u_int32_t flags)
1239 {
1240 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1241 if (!tb[CTA_IP_V6_SRC])
1242 return -EINVAL;
1243
1244 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
1245 }
1246
1247 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1248 if (!tb[CTA_IP_V6_DST])
1249 return -EINVAL;
1250
1251 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
1252 }
1253
1254 return 0;
1255 }
1256
ctnetlink_parse_tuple_ip(struct nlattr * attr,struct nf_conntrack_tuple * tuple,u_int32_t flags)1257 static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1258 struct nf_conntrack_tuple *tuple,
1259 u_int32_t flags)
1260 {
1261 struct nlattr *tb[CTA_IP_MAX+1];
1262 int ret = 0;
1263
1264 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL);
1265 if (ret < 0)
1266 return ret;
1267
1268 ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX,
1269 cta_ip_nla_policy, NULL);
1270 if (ret)
1271 return ret;
1272
1273 switch (tuple->src.l3num) {
1274 case NFPROTO_IPV4:
1275 ret = ipv4_nlattr_to_tuple(tb, tuple, flags);
1276 break;
1277 case NFPROTO_IPV6:
1278 ret = ipv6_nlattr_to_tuple(tb, tuple, flags);
1279 break;
1280 }
1281
1282 return ret;
1283 }
1284
1285 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1286 [CTA_PROTO_NUM] = { .type = NLA_U8 },
1287 };
1288
ctnetlink_parse_tuple_proto(struct nlattr * attr,struct nf_conntrack_tuple * tuple,u_int32_t flags)1289 static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1290 struct nf_conntrack_tuple *tuple,
1291 u_int32_t flags)
1292 {
1293 const struct nf_conntrack_l4proto *l4proto;
1294 struct nlattr *tb[CTA_PROTO_MAX+1];
1295 int ret = 0;
1296
1297 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
1298 proto_nla_policy, NULL);
1299 if (ret < 0)
1300 return ret;
1301
1302 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
1303 return 0;
1304
1305 if (!tb[CTA_PROTO_NUM])
1306 return -EINVAL;
1307
1308 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
1309
1310 rcu_read_lock();
1311 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
1312
1313 if (likely(l4proto->nlattr_to_tuple)) {
1314 ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
1315 l4proto->nla_policy,
1316 NULL);
1317 if (ret == 0)
1318 ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
1319 }
1320
1321 rcu_read_unlock();
1322
1323 return ret;
1324 }
1325
1326 static int
ctnetlink_parse_zone(const struct nlattr * attr,struct nf_conntrack_zone * zone)1327 ctnetlink_parse_zone(const struct nlattr *attr,
1328 struct nf_conntrack_zone *zone)
1329 {
1330 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1331 NF_CT_DEFAULT_ZONE_DIR, 0);
1332 #ifdef CONFIG_NF_CONNTRACK_ZONES
1333 if (attr)
1334 zone->id = ntohs(nla_get_be16(attr));
1335 #else
1336 if (attr)
1337 return -EOPNOTSUPP;
1338 #endif
1339 return 0;
1340 }
1341
1342 static int
ctnetlink_parse_tuple_zone(struct nlattr * attr,enum ctattr_type type,struct nf_conntrack_zone * zone)1343 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1344 struct nf_conntrack_zone *zone)
1345 {
1346 int ret;
1347
1348 if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1349 return -EINVAL;
1350
1351 ret = ctnetlink_parse_zone(attr, zone);
1352 if (ret < 0)
1353 return ret;
1354
1355 if (type == CTA_TUPLE_REPLY)
1356 zone->dir = NF_CT_ZONE_DIR_REPL;
1357 else
1358 zone->dir = NF_CT_ZONE_DIR_ORIG;
1359
1360 return 0;
1361 }
1362
1363 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1364 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
1365 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
1366 [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
1367 };
1368
1369 #define CTA_FILTER_F_ALL_CTA_PROTO \
1370 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
1371 CTA_FILTER_F_CTA_PROTO_DST_PORT | \
1372 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
1373 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
1374 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
1375 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
1376 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
1377 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
1378
1379 static int
ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],struct nf_conntrack_tuple * tuple,u32 type,u_int8_t l3num,struct nf_conntrack_zone * zone,u_int32_t flags)1380 ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
1381 struct nf_conntrack_tuple *tuple, u32 type,
1382 u_int8_t l3num, struct nf_conntrack_zone *zone,
1383 u_int32_t flags)
1384 {
1385 struct nlattr *tb[CTA_TUPLE_MAX+1];
1386 int err;
1387
1388 memset(tuple, 0, sizeof(*tuple));
1389
1390 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
1391 tuple_nla_policy, NULL);
1392 if (err < 0)
1393 return err;
1394
1395 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
1396 return -EOPNOTSUPP;
1397 tuple->src.l3num = l3num;
1398
1399 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
1400 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1401 if (!tb[CTA_TUPLE_IP])
1402 return -EINVAL;
1403
1404 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags);
1405 if (err < 0)
1406 return err;
1407 }
1408
1409 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
1410 if (!tb[CTA_TUPLE_PROTO])
1411 return -EINVAL;
1412
1413 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags);
1414 if (err < 0)
1415 return err;
1416 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
1417 /* Can't manage proto flags without a protonum */
1418 return -EINVAL;
1419 }
1420
1421 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
1422 if (!zone)
1423 return -EINVAL;
1424
1425 err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
1426 type, zone);
1427 if (err < 0)
1428 return err;
1429 }
1430
1431 /* orig and expect tuples get DIR_ORIGINAL */
1432 if (type == CTA_TUPLE_REPLY)
1433 tuple->dst.dir = IP_CT_DIR_REPLY;
1434 else
1435 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1436
1437 return 0;
1438 }
1439
1440 static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],struct nf_conntrack_tuple * tuple,u32 type,u_int8_t l3num,struct nf_conntrack_zone * zone)1441 ctnetlink_parse_tuple(const struct nlattr * const cda[],
1442 struct nf_conntrack_tuple *tuple, u32 type,
1443 u_int8_t l3num, struct nf_conntrack_zone *zone)
1444 {
1445 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
1446 CTA_FILTER_FLAG(ALL));
1447 }
1448
1449 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1450 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
1451 .len = NF_CT_HELPER_NAME_LEN - 1 },
1452 };
1453
ctnetlink_parse_help(const struct nlattr * attr,char ** helper_name,struct nlattr ** helpinfo)1454 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1455 struct nlattr **helpinfo)
1456 {
1457 int err;
1458 struct nlattr *tb[CTA_HELP_MAX+1];
1459
1460 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
1461 help_nla_policy, NULL);
1462 if (err < 0)
1463 return err;
1464
1465 if (!tb[CTA_HELP_NAME])
1466 return -EINVAL;
1467
1468 *helper_name = nla_data(tb[CTA_HELP_NAME]);
1469
1470 if (tb[CTA_HELP_INFO])
1471 *helpinfo = tb[CTA_HELP_INFO];
1472
1473 return 0;
1474 }
1475
1476 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1477 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
1478 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
1479 [CTA_STATUS] = { .type = NLA_U32 },
1480 [CTA_PROTOINFO] = { .type = NLA_NESTED },
1481 [CTA_HELP] = { .type = NLA_NESTED },
1482 [CTA_NAT_SRC] = { .type = NLA_NESTED },
1483 [CTA_TIMEOUT] = { .type = NLA_U32 },
1484 [CTA_MARK] = { .type = NLA_U32 },
1485 [CTA_ID] = { .type = NLA_U32 },
1486 [CTA_NAT_DST] = { .type = NLA_NESTED },
1487 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
1488 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
1489 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1490 [CTA_ZONE] = { .type = NLA_U16 },
1491 [CTA_MARK_MASK] = { .type = NLA_U32 },
1492 [CTA_LABELS] = { .type = NLA_BINARY,
1493 .len = NF_CT_LABELS_MAX_SIZE },
1494 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
1495 .len = NF_CT_LABELS_MAX_SIZE },
1496 [CTA_FILTER] = { .type = NLA_NESTED },
1497 };
1498
ctnetlink_flush_iterate(struct nf_conn * ct,void * data)1499 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1500 {
1501 if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
1502 return 0;
1503
1504 return ctnetlink_filter_match(ct, data);
1505 }
1506
ctnetlink_flush_conntrack(struct net * net,const struct nlattr * const cda[],u32 portid,int report,u8 family)1507 static int ctnetlink_flush_conntrack(struct net *net,
1508 const struct nlattr * const cda[],
1509 u32 portid, int report, u8 family)
1510 {
1511 struct ctnetlink_filter *filter = NULL;
1512
1513 if (ctnetlink_needs_filter(family, cda)) {
1514 if (cda[CTA_FILTER])
1515 return -EOPNOTSUPP;
1516
1517 filter = ctnetlink_alloc_filter(cda, family);
1518 if (IS_ERR(filter))
1519 return PTR_ERR(filter);
1520 }
1521
1522 nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
1523 portid, report);
1524 kfree(filter);
1525
1526 return 0;
1527 }
1528
ctnetlink_del_conntrack(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)1529 static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
1530 struct sk_buff *skb,
1531 const struct nlmsghdr *nlh,
1532 const struct nlattr * const cda[],
1533 struct netlink_ext_ack *extack)
1534 {
1535 struct nf_conntrack_tuple_hash *h;
1536 struct nf_conntrack_tuple tuple;
1537 struct nf_conn *ct;
1538 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1539 struct nf_conntrack_zone zone;
1540 int err;
1541
1542 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1543 if (err < 0)
1544 return err;
1545
1546 if (cda[CTA_TUPLE_ORIG])
1547 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1548 nfmsg->nfgen_family, &zone);
1549 else if (cda[CTA_TUPLE_REPLY])
1550 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1551 nfmsg->nfgen_family, &zone);
1552 else {
1553 u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
1554
1555 return ctnetlink_flush_conntrack(net, cda,
1556 NETLINK_CB(skb).portid,
1557 nlmsg_report(nlh), u3);
1558 }
1559
1560 if (err < 0)
1561 return err;
1562
1563 h = nf_conntrack_find_get(net, &zone, &tuple);
1564 if (!h)
1565 return -ENOENT;
1566
1567 ct = nf_ct_tuplehash_to_ctrack(h);
1568
1569 if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
1570 nf_ct_put(ct);
1571 return -EBUSY;
1572 }
1573
1574 if (cda[CTA_ID]) {
1575 __be32 id = nla_get_be32(cda[CTA_ID]);
1576
1577 if (id != (__force __be32)nf_ct_get_id(ct)) {
1578 nf_ct_put(ct);
1579 return -ENOENT;
1580 }
1581 }
1582
1583 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1584 nf_ct_put(ct);
1585
1586 return 0;
1587 }
1588
ctnetlink_get_conntrack(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)1589 static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
1590 struct sk_buff *skb,
1591 const struct nlmsghdr *nlh,
1592 const struct nlattr * const cda[],
1593 struct netlink_ext_ack *extack)
1594 {
1595 struct nf_conntrack_tuple_hash *h;
1596 struct nf_conntrack_tuple tuple;
1597 struct nf_conn *ct;
1598 struct sk_buff *skb2 = NULL;
1599 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1600 u_int8_t u3 = nfmsg->nfgen_family;
1601 struct nf_conntrack_zone zone;
1602 int err;
1603
1604 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1605 struct netlink_dump_control c = {
1606 .start = ctnetlink_start,
1607 .dump = ctnetlink_dump_table,
1608 .done = ctnetlink_done,
1609 .data = (void *)cda,
1610 };
1611
1612 return netlink_dump_start(ctnl, skb, nlh, &c);
1613 }
1614
1615 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1616 if (err < 0)
1617 return err;
1618
1619 if (cda[CTA_TUPLE_ORIG])
1620 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1621 u3, &zone);
1622 else if (cda[CTA_TUPLE_REPLY])
1623 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1624 u3, &zone);
1625 else
1626 return -EINVAL;
1627
1628 if (err < 0)
1629 return err;
1630
1631 h = nf_conntrack_find_get(net, &zone, &tuple);
1632 if (!h)
1633 return -ENOENT;
1634
1635 ct = nf_ct_tuplehash_to_ctrack(h);
1636
1637 err = -ENOMEM;
1638 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1639 if (skb2 == NULL) {
1640 nf_ct_put(ct);
1641 return -ENOMEM;
1642 }
1643
1644 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1645 NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true, 0);
1646 nf_ct_put(ct);
1647 if (err <= 0)
1648 goto free;
1649
1650 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1651 if (err < 0)
1652 goto out;
1653
1654 return 0;
1655
1656 free:
1657 kfree_skb(skb2);
1658 out:
1659 /* this avoids a loop in nfnetlink. */
1660 return err == -EAGAIN ? -ENOBUFS : err;
1661 }
1662
ctnetlink_done_list(struct netlink_callback * cb)1663 static int ctnetlink_done_list(struct netlink_callback *cb)
1664 {
1665 if (cb->args[1])
1666 nf_ct_put((struct nf_conn *)cb->args[1]);
1667 return 0;
1668 }
1669
1670 static int
ctnetlink_dump_list(struct sk_buff * skb,struct netlink_callback * cb,bool dying)1671 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1672 {
1673 struct nf_conn *ct, *last;
1674 struct nf_conntrack_tuple_hash *h;
1675 struct hlist_nulls_node *n;
1676 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1677 u_int8_t l3proto = nfmsg->nfgen_family;
1678 int res;
1679 int cpu;
1680 struct hlist_nulls_head *list;
1681 struct net *net = sock_net(skb->sk);
1682
1683 if (cb->args[2])
1684 return 0;
1685
1686 last = (struct nf_conn *)cb->args[1];
1687
1688 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1689 struct ct_pcpu *pcpu;
1690
1691 if (!cpu_possible(cpu))
1692 continue;
1693
1694 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1695 spin_lock_bh(&pcpu->lock);
1696 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1697 restart:
1698 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1699 ct = nf_ct_tuplehash_to_ctrack(h);
1700 if (l3proto && nf_ct_l3num(ct) != l3proto)
1701 continue;
1702 if (cb->args[1]) {
1703 if (ct != last)
1704 continue;
1705 cb->args[1] = 0;
1706 }
1707
1708 /* We can't dump extension info for the unconfirmed
1709 * list because unconfirmed conntracks can have
1710 * ct->ext reallocated (and thus freed).
1711 *
1712 * In the dying list case ct->ext can't be free'd
1713 * until after we drop pcpu->lock.
1714 */
1715 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1716 cb->nlh->nlmsg_seq,
1717 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1718 ct, dying ? true : false, 0);
1719 if (res < 0) {
1720 if (!atomic_inc_not_zero(&ct->ct_general.use))
1721 continue;
1722 cb->args[0] = cpu;
1723 cb->args[1] = (unsigned long)ct;
1724 spin_unlock_bh(&pcpu->lock);
1725 goto out;
1726 }
1727 }
1728 if (cb->args[1]) {
1729 cb->args[1] = 0;
1730 goto restart;
1731 }
1732 spin_unlock_bh(&pcpu->lock);
1733 }
1734 cb->args[2] = 1;
1735 out:
1736 if (last)
1737 nf_ct_put(last);
1738
1739 return skb->len;
1740 }
1741
1742 static int
ctnetlink_dump_dying(struct sk_buff * skb,struct netlink_callback * cb)1743 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1744 {
1745 return ctnetlink_dump_list(skb, cb, true);
1746 }
1747
ctnetlink_get_ct_dying(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)1748 static int ctnetlink_get_ct_dying(struct net *net, struct sock *ctnl,
1749 struct sk_buff *skb,
1750 const struct nlmsghdr *nlh,
1751 const struct nlattr * const cda[],
1752 struct netlink_ext_ack *extack)
1753 {
1754 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1755 struct netlink_dump_control c = {
1756 .dump = ctnetlink_dump_dying,
1757 .done = ctnetlink_done_list,
1758 };
1759 return netlink_dump_start(ctnl, skb, nlh, &c);
1760 }
1761
1762 return -EOPNOTSUPP;
1763 }
1764
1765 static int
ctnetlink_dump_unconfirmed(struct sk_buff * skb,struct netlink_callback * cb)1766 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1767 {
1768 return ctnetlink_dump_list(skb, cb, false);
1769 }
1770
ctnetlink_get_ct_unconfirmed(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)1771 static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
1772 struct sk_buff *skb,
1773 const struct nlmsghdr *nlh,
1774 const struct nlattr * const cda[],
1775 struct netlink_ext_ack *extack)
1776 {
1777 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1778 struct netlink_dump_control c = {
1779 .dump = ctnetlink_dump_unconfirmed,
1780 .done = ctnetlink_done_list,
1781 };
1782 return netlink_dump_start(ctnl, skb, nlh, &c);
1783 }
1784
1785 return -EOPNOTSUPP;
1786 }
1787
1788 #if IS_ENABLED(CONFIG_NF_NAT)
1789 static int
ctnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)1790 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1791 enum nf_nat_manip_type manip,
1792 const struct nlattr *attr)
1793 __must_hold(RCU)
1794 {
1795 struct nf_nat_hook *nat_hook;
1796 int err;
1797
1798 nat_hook = rcu_dereference(nf_nat_hook);
1799 if (!nat_hook) {
1800 #ifdef CONFIG_MODULES
1801 rcu_read_unlock();
1802 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1803 if (request_module("nf-nat") < 0) {
1804 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1805 rcu_read_lock();
1806 return -EOPNOTSUPP;
1807 }
1808 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1809 rcu_read_lock();
1810 nat_hook = rcu_dereference(nf_nat_hook);
1811 if (nat_hook)
1812 return -EAGAIN;
1813 #endif
1814 return -EOPNOTSUPP;
1815 }
1816
1817 err = nat_hook->parse_nat_setup(ct, manip, attr);
1818 if (err == -EAGAIN) {
1819 #ifdef CONFIG_MODULES
1820 rcu_read_unlock();
1821 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1822 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1823 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1824 rcu_read_lock();
1825 return -EOPNOTSUPP;
1826 }
1827 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1828 rcu_read_lock();
1829 #else
1830 err = -EOPNOTSUPP;
1831 #endif
1832 }
1833 return err;
1834 }
1835 #endif
1836
1837 static void
__ctnetlink_change_status(struct nf_conn * ct,unsigned long on,unsigned long off)1838 __ctnetlink_change_status(struct nf_conn *ct, unsigned long on,
1839 unsigned long off)
1840 {
1841 unsigned int bit;
1842
1843 /* Ignore these unchangable bits */
1844 on &= ~IPS_UNCHANGEABLE_MASK;
1845 off &= ~IPS_UNCHANGEABLE_MASK;
1846
1847 for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
1848 if (on & (1 << bit))
1849 set_bit(bit, &ct->status);
1850 else if (off & (1 << bit))
1851 clear_bit(bit, &ct->status);
1852 }
1853 }
1854
1855 static int
ctnetlink_change_status(struct nf_conn * ct,const struct nlattr * const cda[])1856 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1857 {
1858 unsigned long d;
1859 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1860 d = ct->status ^ status;
1861
1862 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1863 /* unchangeable */
1864 return -EBUSY;
1865
1866 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1867 /* SEEN_REPLY bit can only be set */
1868 return -EBUSY;
1869
1870 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1871 /* ASSURED bit can only be set */
1872 return -EBUSY;
1873
1874 __ctnetlink_change_status(ct, status, 0);
1875 return 0;
1876 }
1877
1878 static int
ctnetlink_setup_nat(struct nf_conn * ct,const struct nlattr * const cda[])1879 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1880 {
1881 #if IS_ENABLED(CONFIG_NF_NAT)
1882 int ret;
1883
1884 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1885 return 0;
1886
1887 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1888 cda[CTA_NAT_DST]);
1889 if (ret < 0)
1890 return ret;
1891
1892 return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1893 cda[CTA_NAT_SRC]);
1894 #else
1895 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1896 return 0;
1897 return -EOPNOTSUPP;
1898 #endif
1899 }
1900
ctnetlink_change_helper(struct nf_conn * ct,const struct nlattr * const cda[])1901 static int ctnetlink_change_helper(struct nf_conn *ct,
1902 const struct nlattr * const cda[])
1903 {
1904 struct nf_conntrack_helper *helper;
1905 struct nf_conn_help *help = nfct_help(ct);
1906 char *helpname = NULL;
1907 struct nlattr *helpinfo = NULL;
1908 int err;
1909
1910 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1911 if (err < 0)
1912 return err;
1913
1914 /* don't change helper of sibling connections */
1915 if (ct->master) {
1916 /* If we try to change the helper to the same thing twice,
1917 * treat the second attempt as a no-op instead of returning
1918 * an error.
1919 */
1920 err = -EBUSY;
1921 if (help) {
1922 rcu_read_lock();
1923 helper = rcu_dereference(help->helper);
1924 if (helper && !strcmp(helper->name, helpname))
1925 err = 0;
1926 rcu_read_unlock();
1927 }
1928
1929 return err;
1930 }
1931
1932 if (!strcmp(helpname, "")) {
1933 if (help && help->helper) {
1934 /* we had a helper before ... */
1935 nf_ct_remove_expectations(ct);
1936 RCU_INIT_POINTER(help->helper, NULL);
1937 }
1938
1939 return 0;
1940 }
1941
1942 rcu_read_lock();
1943 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1944 nf_ct_protonum(ct));
1945 if (helper == NULL) {
1946 rcu_read_unlock();
1947 return -EOPNOTSUPP;
1948 }
1949
1950 if (help) {
1951 if (help->helper == helper) {
1952 /* update private helper data if allowed. */
1953 if (helper->from_nlattr)
1954 helper->from_nlattr(helpinfo, ct);
1955 err = 0;
1956 } else
1957 err = -EBUSY;
1958 } else {
1959 /* we cannot set a helper for an existing conntrack */
1960 err = -EOPNOTSUPP;
1961 }
1962
1963 rcu_read_unlock();
1964 return err;
1965 }
1966
ctnetlink_change_timeout(struct nf_conn * ct,const struct nlattr * const cda[])1967 static int ctnetlink_change_timeout(struct nf_conn *ct,
1968 const struct nlattr * const cda[])
1969 {
1970 u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1971
1972 if (timeout > INT_MAX)
1973 timeout = INT_MAX;
1974 WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
1975
1976 if (test_bit(IPS_DYING_BIT, &ct->status))
1977 return -ETIME;
1978
1979 return 0;
1980 }
1981
1982 #if defined(CONFIG_NF_CONNTRACK_MARK)
ctnetlink_change_mark(struct nf_conn * ct,const struct nlattr * const cda[])1983 static void ctnetlink_change_mark(struct nf_conn *ct,
1984 const struct nlattr * const cda[])
1985 {
1986 u32 mark, newmark, mask = 0;
1987
1988 if (cda[CTA_MARK_MASK])
1989 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1990
1991 mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1992 newmark = (ct->mark & mask) ^ mark;
1993 if (newmark != ct->mark)
1994 ct->mark = newmark;
1995 }
1996 #endif
1997
1998 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1999 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
2000 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
2001 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
2002 };
2003
ctnetlink_change_protoinfo(struct nf_conn * ct,const struct nlattr * const cda[])2004 static int ctnetlink_change_protoinfo(struct nf_conn *ct,
2005 const struct nlattr * const cda[])
2006 {
2007 const struct nlattr *attr = cda[CTA_PROTOINFO];
2008 const struct nf_conntrack_l4proto *l4proto;
2009 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
2010 int err = 0;
2011
2012 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
2013 protoinfo_policy, NULL);
2014 if (err < 0)
2015 return err;
2016
2017 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
2018 if (l4proto->from_nlattr)
2019 err = l4proto->from_nlattr(tb, ct);
2020
2021 return err;
2022 }
2023
2024 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
2025 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
2026 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
2027 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
2028 };
2029
change_seq_adj(struct nf_ct_seqadj * seq,const struct nlattr * const attr)2030 static int change_seq_adj(struct nf_ct_seqadj *seq,
2031 const struct nlattr * const attr)
2032 {
2033 int err;
2034 struct nlattr *cda[CTA_SEQADJ_MAX+1];
2035
2036 err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
2037 seqadj_policy, NULL);
2038 if (err < 0)
2039 return err;
2040
2041 if (!cda[CTA_SEQADJ_CORRECTION_POS])
2042 return -EINVAL;
2043
2044 seq->correction_pos =
2045 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
2046
2047 if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
2048 return -EINVAL;
2049
2050 seq->offset_before =
2051 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
2052
2053 if (!cda[CTA_SEQADJ_OFFSET_AFTER])
2054 return -EINVAL;
2055
2056 seq->offset_after =
2057 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
2058
2059 return 0;
2060 }
2061
2062 static int
ctnetlink_change_seq_adj(struct nf_conn * ct,const struct nlattr * const cda[])2063 ctnetlink_change_seq_adj(struct nf_conn *ct,
2064 const struct nlattr * const cda[])
2065 {
2066 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
2067 int ret = 0;
2068
2069 if (!seqadj)
2070 return 0;
2071
2072 spin_lock_bh(&ct->lock);
2073 if (cda[CTA_SEQ_ADJ_ORIG]) {
2074 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
2075 cda[CTA_SEQ_ADJ_ORIG]);
2076 if (ret < 0)
2077 goto err;
2078
2079 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2080 }
2081
2082 if (cda[CTA_SEQ_ADJ_REPLY]) {
2083 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
2084 cda[CTA_SEQ_ADJ_REPLY]);
2085 if (ret < 0)
2086 goto err;
2087
2088 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2089 }
2090
2091 spin_unlock_bh(&ct->lock);
2092 return 0;
2093 err:
2094 spin_unlock_bh(&ct->lock);
2095 return ret;
2096 }
2097
2098 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
2099 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 },
2100 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 },
2101 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 },
2102 };
2103
ctnetlink_change_synproxy(struct nf_conn * ct,const struct nlattr * const cda[])2104 static int ctnetlink_change_synproxy(struct nf_conn *ct,
2105 const struct nlattr * const cda[])
2106 {
2107 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
2108 struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
2109 int err;
2110
2111 if (!synproxy)
2112 return 0;
2113
2114 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
2115 cda[CTA_SYNPROXY], synproxy_policy,
2116 NULL);
2117 if (err < 0)
2118 return err;
2119
2120 if (!tb[CTA_SYNPROXY_ISN] ||
2121 !tb[CTA_SYNPROXY_ITS] ||
2122 !tb[CTA_SYNPROXY_TSOFF])
2123 return -EINVAL;
2124
2125 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
2126 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
2127 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
2128
2129 return 0;
2130 }
2131
2132 static int
ctnetlink_attach_labels(struct nf_conn * ct,const struct nlattr * const cda[])2133 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
2134 {
2135 #ifdef CONFIG_NF_CONNTRACK_LABELS
2136 size_t len = nla_len(cda[CTA_LABELS]);
2137 const void *mask = cda[CTA_LABELS_MASK];
2138
2139 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
2140 return -EINVAL;
2141
2142 if (mask) {
2143 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
2144 nla_len(cda[CTA_LABELS_MASK]) != len)
2145 return -EINVAL;
2146 mask = nla_data(cda[CTA_LABELS_MASK]);
2147 }
2148
2149 len /= sizeof(u32);
2150
2151 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
2152 #else
2153 return -EOPNOTSUPP;
2154 #endif
2155 }
2156
2157 static int
ctnetlink_change_conntrack(struct nf_conn * ct,const struct nlattr * const cda[])2158 ctnetlink_change_conntrack(struct nf_conn *ct,
2159 const struct nlattr * const cda[])
2160 {
2161 int err;
2162
2163 /* only allow NAT changes and master assignation for new conntracks */
2164 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
2165 return -EOPNOTSUPP;
2166
2167 if (cda[CTA_HELP]) {
2168 err = ctnetlink_change_helper(ct, cda);
2169 if (err < 0)
2170 return err;
2171 }
2172
2173 if (cda[CTA_TIMEOUT]) {
2174 err = ctnetlink_change_timeout(ct, cda);
2175 if (err < 0)
2176 return err;
2177 }
2178
2179 if (cda[CTA_STATUS]) {
2180 err = ctnetlink_change_status(ct, cda);
2181 if (err < 0)
2182 return err;
2183 }
2184
2185 if (cda[CTA_PROTOINFO]) {
2186 err = ctnetlink_change_protoinfo(ct, cda);
2187 if (err < 0)
2188 return err;
2189 }
2190
2191 #if defined(CONFIG_NF_CONNTRACK_MARK)
2192 if (cda[CTA_MARK])
2193 ctnetlink_change_mark(ct, cda);
2194 #endif
2195
2196 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2197 err = ctnetlink_change_seq_adj(ct, cda);
2198 if (err < 0)
2199 return err;
2200 }
2201
2202 if (cda[CTA_SYNPROXY]) {
2203 err = ctnetlink_change_synproxy(ct, cda);
2204 if (err < 0)
2205 return err;
2206 }
2207
2208 if (cda[CTA_LABELS]) {
2209 err = ctnetlink_attach_labels(ct, cda);
2210 if (err < 0)
2211 return err;
2212 }
2213
2214 return 0;
2215 }
2216
2217 static struct nf_conn *
ctnetlink_create_conntrack(struct net * net,const struct nf_conntrack_zone * zone,const struct nlattr * const cda[],struct nf_conntrack_tuple * otuple,struct nf_conntrack_tuple * rtuple,u8 u3)2218 ctnetlink_create_conntrack(struct net *net,
2219 const struct nf_conntrack_zone *zone,
2220 const struct nlattr * const cda[],
2221 struct nf_conntrack_tuple *otuple,
2222 struct nf_conntrack_tuple *rtuple,
2223 u8 u3)
2224 {
2225 struct nf_conn *ct;
2226 int err = -EINVAL;
2227 struct nf_conntrack_helper *helper;
2228 struct nf_conn_tstamp *tstamp;
2229 u64 timeout;
2230
2231 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
2232 if (IS_ERR(ct))
2233 return ERR_PTR(-ENOMEM);
2234
2235 if (!cda[CTA_TIMEOUT])
2236 goto err1;
2237
2238 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
2239 if (timeout > INT_MAX)
2240 timeout = INT_MAX;
2241 ct->timeout = (u32)timeout + nfct_time_stamp;
2242
2243 rcu_read_lock();
2244 if (cda[CTA_HELP]) {
2245 char *helpname = NULL;
2246 struct nlattr *helpinfo = NULL;
2247
2248 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
2249 if (err < 0)
2250 goto err2;
2251
2252 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2253 nf_ct_protonum(ct));
2254 if (helper == NULL) {
2255 rcu_read_unlock();
2256 #ifdef CONFIG_MODULES
2257 if (request_module("nfct-helper-%s", helpname) < 0) {
2258 err = -EOPNOTSUPP;
2259 goto err1;
2260 }
2261
2262 rcu_read_lock();
2263 helper = __nf_conntrack_helper_find(helpname,
2264 nf_ct_l3num(ct),
2265 nf_ct_protonum(ct));
2266 if (helper) {
2267 err = -EAGAIN;
2268 goto err2;
2269 }
2270 rcu_read_unlock();
2271 #endif
2272 err = -EOPNOTSUPP;
2273 goto err1;
2274 } else {
2275 struct nf_conn_help *help;
2276
2277 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2278 if (help == NULL) {
2279 err = -ENOMEM;
2280 goto err2;
2281 }
2282 /* set private helper data if allowed. */
2283 if (helper->from_nlattr)
2284 helper->from_nlattr(helpinfo, ct);
2285
2286 /* not in hash table yet so not strictly necessary */
2287 RCU_INIT_POINTER(help->helper, helper);
2288 }
2289 } else {
2290 /* try an implicit helper assignation */
2291 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
2292 if (err < 0)
2293 goto err2;
2294 }
2295
2296 err = ctnetlink_setup_nat(ct, cda);
2297 if (err < 0)
2298 goto err2;
2299
2300 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2301 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2302 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
2303 nf_ct_labels_ext_add(ct);
2304 nfct_seqadj_ext_add(ct);
2305 nfct_synproxy_ext_add(ct);
2306
2307 /* we must add conntrack extensions before confirmation. */
2308 ct->status |= IPS_CONFIRMED;
2309
2310 if (cda[CTA_STATUS]) {
2311 err = ctnetlink_change_status(ct, cda);
2312 if (err < 0)
2313 goto err2;
2314 }
2315
2316 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2317 err = ctnetlink_change_seq_adj(ct, cda);
2318 if (err < 0)
2319 goto err2;
2320 }
2321
2322 memset(&ct->proto, 0, sizeof(ct->proto));
2323 if (cda[CTA_PROTOINFO]) {
2324 err = ctnetlink_change_protoinfo(ct, cda);
2325 if (err < 0)
2326 goto err2;
2327 }
2328
2329 if (cda[CTA_SYNPROXY]) {
2330 err = ctnetlink_change_synproxy(ct, cda);
2331 if (err < 0)
2332 goto err2;
2333 }
2334
2335 #if defined(CONFIG_NF_CONNTRACK_MARK)
2336 if (cda[CTA_MARK])
2337 ctnetlink_change_mark(ct, cda);
2338 #endif
2339
2340 /* setup master conntrack: this is a confirmed expectation */
2341 if (cda[CTA_TUPLE_MASTER]) {
2342 struct nf_conntrack_tuple master;
2343 struct nf_conntrack_tuple_hash *master_h;
2344 struct nf_conn *master_ct;
2345
2346 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
2347 u3, NULL);
2348 if (err < 0)
2349 goto err2;
2350
2351 master_h = nf_conntrack_find_get(net, zone, &master);
2352 if (master_h == NULL) {
2353 err = -ENOENT;
2354 goto err2;
2355 }
2356 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
2357 __set_bit(IPS_EXPECTED_BIT, &ct->status);
2358 ct->master = master_ct;
2359 }
2360 tstamp = nf_conn_tstamp_find(ct);
2361 if (tstamp)
2362 tstamp->start = ktime_get_real_ns();
2363
2364 err = nf_conntrack_hash_check_insert(ct);
2365 if (err < 0)
2366 goto err2;
2367
2368 rcu_read_unlock();
2369
2370 return ct;
2371
2372 err2:
2373 rcu_read_unlock();
2374 err1:
2375 nf_conntrack_free(ct);
2376 return ERR_PTR(err);
2377 }
2378
ctnetlink_new_conntrack(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)2379 static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
2380 struct sk_buff *skb,
2381 const struct nlmsghdr *nlh,
2382 const struct nlattr * const cda[],
2383 struct netlink_ext_ack *extack)
2384 {
2385 struct nf_conntrack_tuple otuple, rtuple;
2386 struct nf_conntrack_tuple_hash *h = NULL;
2387 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2388 struct nf_conn *ct;
2389 u_int8_t u3 = nfmsg->nfgen_family;
2390 struct nf_conntrack_zone zone;
2391 int err;
2392
2393 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
2394 if (err < 0)
2395 return err;
2396
2397 if (cda[CTA_TUPLE_ORIG]) {
2398 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
2399 u3, &zone);
2400 if (err < 0)
2401 return err;
2402 }
2403
2404 if (cda[CTA_TUPLE_REPLY]) {
2405 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
2406 u3, &zone);
2407 if (err < 0)
2408 return err;
2409 }
2410
2411 if (cda[CTA_TUPLE_ORIG])
2412 h = nf_conntrack_find_get(net, &zone, &otuple);
2413 else if (cda[CTA_TUPLE_REPLY])
2414 h = nf_conntrack_find_get(net, &zone, &rtuple);
2415
2416 if (h == NULL) {
2417 err = -ENOENT;
2418 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2419 enum ip_conntrack_events events;
2420
2421 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2422 return -EINVAL;
2423 if (otuple.dst.protonum != rtuple.dst.protonum)
2424 return -EINVAL;
2425
2426 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
2427 &rtuple, u3);
2428 if (IS_ERR(ct))
2429 return PTR_ERR(ct);
2430
2431 err = 0;
2432 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2433 events = 1 << IPCT_RELATED;
2434 else
2435 events = 1 << IPCT_NEW;
2436
2437 if (cda[CTA_LABELS] &&
2438 ctnetlink_attach_labels(ct, cda) == 0)
2439 events |= (1 << IPCT_LABEL);
2440
2441 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2442 (1 << IPCT_ASSURED) |
2443 (1 << IPCT_HELPER) |
2444 (1 << IPCT_PROTOINFO) |
2445 (1 << IPCT_SEQADJ) |
2446 (1 << IPCT_MARK) |
2447 (1 << IPCT_SYNPROXY) |
2448 events,
2449 ct, NETLINK_CB(skb).portid,
2450 nlmsg_report(nlh));
2451 nf_ct_put(ct);
2452 }
2453
2454 return err;
2455 }
2456 /* implicit 'else' */
2457
2458 err = -EEXIST;
2459 ct = nf_ct_tuplehash_to_ctrack(h);
2460 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
2461 err = ctnetlink_change_conntrack(ct, cda);
2462 if (err == 0) {
2463 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2464 (1 << IPCT_ASSURED) |
2465 (1 << IPCT_HELPER) |
2466 (1 << IPCT_LABEL) |
2467 (1 << IPCT_PROTOINFO) |
2468 (1 << IPCT_SEQADJ) |
2469 (1 << IPCT_MARK) |
2470 (1 << IPCT_SYNPROXY),
2471 ct, NETLINK_CB(skb).portid,
2472 nlmsg_report(nlh));
2473 }
2474 }
2475
2476 nf_ct_put(ct);
2477 return err;
2478 }
2479
2480 static int
ctnetlink_ct_stat_cpu_fill_info(struct sk_buff * skb,u32 portid,u32 seq,__u16 cpu,const struct ip_conntrack_stat * st)2481 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2482 __u16 cpu, const struct ip_conntrack_stat *st)
2483 {
2484 struct nlmsghdr *nlh;
2485 struct nfgenmsg *nfmsg;
2486 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2487
2488 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2489 IPCTNL_MSG_CT_GET_STATS_CPU);
2490 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2491 if (nlh == NULL)
2492 goto nlmsg_failure;
2493
2494 nfmsg = nlmsg_data(nlh);
2495 nfmsg->nfgen_family = AF_UNSPEC;
2496 nfmsg->version = NFNETLINK_V0;
2497 nfmsg->res_id = htons(cpu);
2498
2499 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
2500 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
2501 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
2502 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
2503 htonl(st->insert_failed)) ||
2504 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
2505 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2506 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
2507 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
2508 htonl(st->search_restart)) ||
2509 nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE,
2510 htonl(st->clash_resolve)))
2511 goto nla_put_failure;
2512
2513 nlmsg_end(skb, nlh);
2514 return skb->len;
2515
2516 nla_put_failure:
2517 nlmsg_failure:
2518 nlmsg_cancel(skb, nlh);
2519 return -1;
2520 }
2521
2522 static int
ctnetlink_ct_stat_cpu_dump(struct sk_buff * skb,struct netlink_callback * cb)2523 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2524 {
2525 int cpu;
2526 struct net *net = sock_net(skb->sk);
2527
2528 if (cb->args[0] == nr_cpu_ids)
2529 return 0;
2530
2531 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2532 const struct ip_conntrack_stat *st;
2533
2534 if (!cpu_possible(cpu))
2535 continue;
2536
2537 st = per_cpu_ptr(net->ct.stat, cpu);
2538 if (ctnetlink_ct_stat_cpu_fill_info(skb,
2539 NETLINK_CB(cb->skb).portid,
2540 cb->nlh->nlmsg_seq,
2541 cpu, st) < 0)
2542 break;
2543 }
2544 cb->args[0] = cpu;
2545
2546 return skb->len;
2547 }
2548
ctnetlink_stat_ct_cpu(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)2549 static int ctnetlink_stat_ct_cpu(struct net *net, struct sock *ctnl,
2550 struct sk_buff *skb,
2551 const struct nlmsghdr *nlh,
2552 const struct nlattr * const cda[],
2553 struct netlink_ext_ack *extack)
2554 {
2555 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2556 struct netlink_dump_control c = {
2557 .dump = ctnetlink_ct_stat_cpu_dump,
2558 };
2559 return netlink_dump_start(ctnl, skb, nlh, &c);
2560 }
2561
2562 return 0;
2563 }
2564
2565 static int
ctnetlink_stat_ct_fill_info(struct sk_buff * skb,u32 portid,u32 seq,u32 type,struct net * net)2566 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2567 struct net *net)
2568 {
2569 struct nlmsghdr *nlh;
2570 struct nfgenmsg *nfmsg;
2571 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2572 unsigned int nr_conntracks = atomic_read(&net->ct.count);
2573
2574 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
2575 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2576 if (nlh == NULL)
2577 goto nlmsg_failure;
2578
2579 nfmsg = nlmsg_data(nlh);
2580 nfmsg->nfgen_family = AF_UNSPEC;
2581 nfmsg->version = NFNETLINK_V0;
2582 nfmsg->res_id = 0;
2583
2584 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2585 goto nla_put_failure;
2586
2587 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2588 goto nla_put_failure;
2589
2590 nlmsg_end(skb, nlh);
2591 return skb->len;
2592
2593 nla_put_failure:
2594 nlmsg_failure:
2595 nlmsg_cancel(skb, nlh);
2596 return -1;
2597 }
2598
ctnetlink_stat_ct(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)2599 static int ctnetlink_stat_ct(struct net *net, struct sock *ctnl,
2600 struct sk_buff *skb, const struct nlmsghdr *nlh,
2601 const struct nlattr * const cda[],
2602 struct netlink_ext_ack *extack)
2603 {
2604 struct sk_buff *skb2;
2605 int err;
2606
2607 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2608 if (skb2 == NULL)
2609 return -ENOMEM;
2610
2611 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2612 nlh->nlmsg_seq,
2613 NFNL_MSG_TYPE(nlh->nlmsg_type),
2614 sock_net(skb->sk));
2615 if (err <= 0)
2616 goto free;
2617
2618 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2619 if (err < 0)
2620 goto out;
2621
2622 return 0;
2623
2624 free:
2625 kfree_skb(skb2);
2626 out:
2627 /* this avoids a loop in nfnetlink. */
2628 return err == -EAGAIN ? -ENOBUFS : err;
2629 }
2630
2631 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2632 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2633 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2634 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2635 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2636 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2637 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2638 .len = NF_CT_HELPER_NAME_LEN - 1 },
2639 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2640 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2641 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2642 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2643 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2644 };
2645
2646 static struct nf_conntrack_expect *
2647 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2648 struct nf_conntrack_helper *helper,
2649 struct nf_conntrack_tuple *tuple,
2650 struct nf_conntrack_tuple *mask);
2651
2652 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2653 static size_t
ctnetlink_glue_build_size(const struct nf_conn * ct)2654 ctnetlink_glue_build_size(const struct nf_conn *ct)
2655 {
2656 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2657 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2658 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2659 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2660 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2661 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2662 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2663 + nla_total_size(0) /* CTA_PROTOINFO */
2664 + nla_total_size(0) /* CTA_HELP */
2665 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2666 + ctnetlink_secctx_size(ct)
2667 #if IS_ENABLED(CONFIG_NF_NAT)
2668 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2669 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2670 #endif
2671 #ifdef CONFIG_NF_CONNTRACK_MARK
2672 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2673 #endif
2674 #ifdef CONFIG_NF_CONNTRACK_ZONES
2675 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2676 #endif
2677 + ctnetlink_proto_size(ct)
2678 ;
2679 }
2680
ctnetlink_glue_get_ct(const struct sk_buff * skb,enum ip_conntrack_info * ctinfo)2681 static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
2682 enum ip_conntrack_info *ctinfo)
2683 {
2684 return nf_ct_get(skb, ctinfo);
2685 }
2686
__ctnetlink_glue_build(struct sk_buff * skb,struct nf_conn * ct)2687 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2688 {
2689 const struct nf_conntrack_zone *zone;
2690 struct nlattr *nest_parms;
2691
2692 zone = nf_ct_zone(ct);
2693
2694 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
2695 if (!nest_parms)
2696 goto nla_put_failure;
2697 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2698 goto nla_put_failure;
2699 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2700 NF_CT_ZONE_DIR_ORIG) < 0)
2701 goto nla_put_failure;
2702 nla_nest_end(skb, nest_parms);
2703
2704 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
2705 if (!nest_parms)
2706 goto nla_put_failure;
2707 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2708 goto nla_put_failure;
2709 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2710 NF_CT_ZONE_DIR_REPL) < 0)
2711 goto nla_put_failure;
2712 nla_nest_end(skb, nest_parms);
2713
2714 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
2715 NF_CT_DEFAULT_ZONE_DIR) < 0)
2716 goto nla_put_failure;
2717
2718 if (ctnetlink_dump_id(skb, ct) < 0)
2719 goto nla_put_failure;
2720
2721 if (ctnetlink_dump_status(skb, ct) < 0)
2722 goto nla_put_failure;
2723
2724 if (ctnetlink_dump_timeout(skb, ct) < 0)
2725 goto nla_put_failure;
2726
2727 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2728 goto nla_put_failure;
2729
2730 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2731 goto nla_put_failure;
2732
2733 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2734 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2735 goto nla_put_failure;
2736 #endif
2737 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2738 goto nla_put_failure;
2739
2740 if ((ct->status & IPS_SEQ_ADJUST) &&
2741 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2742 goto nla_put_failure;
2743
2744 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2745 goto nla_put_failure;
2746
2747 #ifdef CONFIG_NF_CONNTRACK_MARK
2748 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2749 goto nla_put_failure;
2750 #endif
2751 if (ctnetlink_dump_labels(skb, ct) < 0)
2752 goto nla_put_failure;
2753 return 0;
2754
2755 nla_put_failure:
2756 return -ENOSPC;
2757 }
2758
2759 static int
ctnetlink_glue_build(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,u_int16_t ct_attr,u_int16_t ct_info_attr)2760 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2761 enum ip_conntrack_info ctinfo,
2762 u_int16_t ct_attr, u_int16_t ct_info_attr)
2763 {
2764 struct nlattr *nest_parms;
2765
2766 nest_parms = nla_nest_start(skb, ct_attr);
2767 if (!nest_parms)
2768 goto nla_put_failure;
2769
2770 if (__ctnetlink_glue_build(skb, ct) < 0)
2771 goto nla_put_failure;
2772
2773 nla_nest_end(skb, nest_parms);
2774
2775 if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
2776 goto nla_put_failure;
2777
2778 return 0;
2779
2780 nla_put_failure:
2781 return -ENOSPC;
2782 }
2783
2784 static int
ctnetlink_update_status(struct nf_conn * ct,const struct nlattr * const cda[])2785 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2786 {
2787 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2788 unsigned long d = ct->status ^ status;
2789
2790 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2791 /* SEEN_REPLY bit can only be set */
2792 return -EBUSY;
2793
2794 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2795 /* ASSURED bit can only be set */
2796 return -EBUSY;
2797
2798 /* This check is less strict than ctnetlink_change_status()
2799 * because callers often flip IPS_EXPECTED bits when sending
2800 * an NFQA_CT attribute to the kernel. So ignore the
2801 * unchangeable bits but do not error out. Also user programs
2802 * are allowed to clear the bits that they are allowed to change.
2803 */
2804 __ctnetlink_change_status(ct, status, ~status);
2805 return 0;
2806 }
2807
2808 static int
ctnetlink_glue_parse_ct(const struct nlattr * cda[],struct nf_conn * ct)2809 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2810 {
2811 int err;
2812
2813 if (cda[CTA_TIMEOUT]) {
2814 err = ctnetlink_change_timeout(ct, cda);
2815 if (err < 0)
2816 return err;
2817 }
2818 if (cda[CTA_STATUS]) {
2819 err = ctnetlink_update_status(ct, cda);
2820 if (err < 0)
2821 return err;
2822 }
2823 if (cda[CTA_HELP]) {
2824 err = ctnetlink_change_helper(ct, cda);
2825 if (err < 0)
2826 return err;
2827 }
2828 if (cda[CTA_LABELS]) {
2829 err = ctnetlink_attach_labels(ct, cda);
2830 if (err < 0)
2831 return err;
2832 }
2833 #if defined(CONFIG_NF_CONNTRACK_MARK)
2834 if (cda[CTA_MARK]) {
2835 ctnetlink_change_mark(ct, cda);
2836 }
2837 #endif
2838 return 0;
2839 }
2840
2841 static int
ctnetlink_glue_parse(const struct nlattr * attr,struct nf_conn * ct)2842 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2843 {
2844 struct nlattr *cda[CTA_MAX+1];
2845 int ret;
2846
2847 ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
2848 NULL);
2849 if (ret < 0)
2850 return ret;
2851
2852 return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
2853 }
2854
ctnetlink_glue_exp_parse(const struct nlattr * const * cda,const struct nf_conn * ct,struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * mask)2855 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2856 const struct nf_conn *ct,
2857 struct nf_conntrack_tuple *tuple,
2858 struct nf_conntrack_tuple *mask)
2859 {
2860 int err;
2861
2862 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2863 nf_ct_l3num(ct), NULL);
2864 if (err < 0)
2865 return err;
2866
2867 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2868 nf_ct_l3num(ct), NULL);
2869 }
2870
2871 static int
ctnetlink_glue_attach_expect(const struct nlattr * attr,struct nf_conn * ct,u32 portid,u32 report)2872 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2873 u32 portid, u32 report)
2874 {
2875 struct nlattr *cda[CTA_EXPECT_MAX+1];
2876 struct nf_conntrack_tuple tuple, mask;
2877 struct nf_conntrack_helper *helper = NULL;
2878 struct nf_conntrack_expect *exp;
2879 int err;
2880
2881 err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
2882 exp_nla_policy, NULL);
2883 if (err < 0)
2884 return err;
2885
2886 err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
2887 ct, &tuple, &mask);
2888 if (err < 0)
2889 return err;
2890
2891 if (cda[CTA_EXPECT_HELP_NAME]) {
2892 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2893
2894 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2895 nf_ct_protonum(ct));
2896 if (helper == NULL)
2897 return -EOPNOTSUPP;
2898 }
2899
2900 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2901 helper, &tuple, &mask);
2902 if (IS_ERR(exp))
2903 return PTR_ERR(exp);
2904
2905 err = nf_ct_expect_related_report(exp, portid, report, 0);
2906 nf_ct_expect_put(exp);
2907 return err;
2908 }
2909
ctnetlink_glue_seqadj(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,int diff)2910 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2911 enum ip_conntrack_info ctinfo, int diff)
2912 {
2913 if (!(ct->status & IPS_NAT_MASK))
2914 return;
2915
2916 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
2917 }
2918
2919 static struct nfnl_ct_hook ctnetlink_glue_hook = {
2920 .get_ct = ctnetlink_glue_get_ct,
2921 .build_size = ctnetlink_glue_build_size,
2922 .build = ctnetlink_glue_build,
2923 .parse = ctnetlink_glue_parse,
2924 .attach_expect = ctnetlink_glue_attach_expect,
2925 .seq_adjust = ctnetlink_glue_seqadj,
2926 };
2927 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2928
2929 /***********************************************************************
2930 * EXPECT
2931 ***********************************************************************/
2932
ctnetlink_exp_dump_tuple(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,u32 type)2933 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2934 const struct nf_conntrack_tuple *tuple,
2935 u32 type)
2936 {
2937 struct nlattr *nest_parms;
2938
2939 nest_parms = nla_nest_start(skb, type);
2940 if (!nest_parms)
2941 goto nla_put_failure;
2942 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2943 goto nla_put_failure;
2944 nla_nest_end(skb, nest_parms);
2945
2946 return 0;
2947
2948 nla_put_failure:
2949 return -1;
2950 }
2951
ctnetlink_exp_dump_mask(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_tuple_mask * mask)2952 static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2953 const struct nf_conntrack_tuple *tuple,
2954 const struct nf_conntrack_tuple_mask *mask)
2955 {
2956 const struct nf_conntrack_l4proto *l4proto;
2957 struct nf_conntrack_tuple m;
2958 struct nlattr *nest_parms;
2959 int ret;
2960
2961 memset(&m, 0xFF, sizeof(m));
2962 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2963 m.src.u.all = mask->src.u.all;
2964 m.src.l3num = tuple->src.l3num;
2965 m.dst.protonum = tuple->dst.protonum;
2966
2967 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2968 if (!nest_parms)
2969 goto nla_put_failure;
2970
2971 rcu_read_lock();
2972 ret = ctnetlink_dump_tuples_ip(skb, &m);
2973 if (ret >= 0) {
2974 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
2975 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2976 }
2977 rcu_read_unlock();
2978
2979 if (unlikely(ret < 0))
2980 goto nla_put_failure;
2981
2982 nla_nest_end(skb, nest_parms);
2983
2984 return 0;
2985
2986 nla_put_failure:
2987 return -1;
2988 }
2989
2990 static const union nf_inet_addr any_addr;
2991
nf_expect_get_id(const struct nf_conntrack_expect * exp)2992 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
2993 {
2994 static __read_mostly siphash_key_t exp_id_seed;
2995 unsigned long a, b, c, d;
2996
2997 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
2998
2999 a = (unsigned long)exp;
3000 b = (unsigned long)exp->helper;
3001 c = (unsigned long)exp->master;
3002 d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
3003
3004 #ifdef CONFIG_64BIT
3005 return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
3006 #else
3007 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
3008 #endif
3009 }
3010
3011 static int
ctnetlink_exp_dump_expect(struct sk_buff * skb,const struct nf_conntrack_expect * exp)3012 ctnetlink_exp_dump_expect(struct sk_buff *skb,
3013 const struct nf_conntrack_expect *exp)
3014 {
3015 struct nf_conn *master = exp->master;
3016 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
3017 struct nf_conn_help *help;
3018 #if IS_ENABLED(CONFIG_NF_NAT)
3019 struct nlattr *nest_parms;
3020 struct nf_conntrack_tuple nat_tuple = {};
3021 #endif
3022 struct nf_ct_helper_expectfn *expfn;
3023
3024 if (timeout < 0)
3025 timeout = 0;
3026
3027 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
3028 goto nla_put_failure;
3029 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
3030 goto nla_put_failure;
3031 if (ctnetlink_exp_dump_tuple(skb,
3032 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3033 CTA_EXPECT_MASTER) < 0)
3034 goto nla_put_failure;
3035
3036 #if IS_ENABLED(CONFIG_NF_NAT)
3037 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
3038 exp->saved_proto.all) {
3039 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
3040 if (!nest_parms)
3041 goto nla_put_failure;
3042
3043 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
3044 goto nla_put_failure;
3045
3046 nat_tuple.src.l3num = nf_ct_l3num(master);
3047 nat_tuple.src.u3 = exp->saved_addr;
3048 nat_tuple.dst.protonum = nf_ct_protonum(master);
3049 nat_tuple.src.u = exp->saved_proto;
3050
3051 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
3052 CTA_EXPECT_NAT_TUPLE) < 0)
3053 goto nla_put_failure;
3054 nla_nest_end(skb, nest_parms);
3055 }
3056 #endif
3057 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
3058 nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
3059 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
3060 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
3061 goto nla_put_failure;
3062 help = nfct_help(master);
3063 if (help) {
3064 struct nf_conntrack_helper *helper;
3065
3066 helper = rcu_dereference(help->helper);
3067 if (helper &&
3068 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
3069 goto nla_put_failure;
3070 }
3071 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
3072 if (expfn != NULL &&
3073 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
3074 goto nla_put_failure;
3075
3076 return 0;
3077
3078 nla_put_failure:
3079 return -1;
3080 }
3081
3082 static int
ctnetlink_exp_fill_info(struct sk_buff * skb,u32 portid,u32 seq,int event,const struct nf_conntrack_expect * exp)3083 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
3084 int event, const struct nf_conntrack_expect *exp)
3085 {
3086 struct nlmsghdr *nlh;
3087 struct nfgenmsg *nfmsg;
3088 unsigned int flags = portid ? NLM_F_MULTI : 0;
3089
3090 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
3091 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3092 if (nlh == NULL)
3093 goto nlmsg_failure;
3094
3095 nfmsg = nlmsg_data(nlh);
3096 nfmsg->nfgen_family = exp->tuple.src.l3num;
3097 nfmsg->version = NFNETLINK_V0;
3098 nfmsg->res_id = 0;
3099
3100 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3101 goto nla_put_failure;
3102
3103 nlmsg_end(skb, nlh);
3104 return skb->len;
3105
3106 nlmsg_failure:
3107 nla_put_failure:
3108 nlmsg_cancel(skb, nlh);
3109 return -1;
3110 }
3111
3112 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3113 static int
ctnetlink_expect_event(unsigned int events,struct nf_exp_event * item)3114 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
3115 {
3116 struct nf_conntrack_expect *exp = item->exp;
3117 struct net *net = nf_ct_exp_net(exp);
3118 struct nlmsghdr *nlh;
3119 struct nfgenmsg *nfmsg;
3120 struct sk_buff *skb;
3121 unsigned int type, group;
3122 int flags = 0;
3123
3124 if (events & (1 << IPEXP_DESTROY)) {
3125 type = IPCTNL_MSG_EXP_DELETE;
3126 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
3127 } else if (events & (1 << IPEXP_NEW)) {
3128 type = IPCTNL_MSG_EXP_NEW;
3129 flags = NLM_F_CREATE|NLM_F_EXCL;
3130 group = NFNLGRP_CONNTRACK_EXP_NEW;
3131 } else
3132 return 0;
3133
3134 if (!item->report && !nfnetlink_has_listeners(net, group))
3135 return 0;
3136
3137 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3138 if (skb == NULL)
3139 goto errout;
3140
3141 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
3142 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
3143 if (nlh == NULL)
3144 goto nlmsg_failure;
3145
3146 nfmsg = nlmsg_data(nlh);
3147 nfmsg->nfgen_family = exp->tuple.src.l3num;
3148 nfmsg->version = NFNETLINK_V0;
3149 nfmsg->res_id = 0;
3150
3151 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3152 goto nla_put_failure;
3153
3154 nlmsg_end(skb, nlh);
3155 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
3156 return 0;
3157
3158 nla_put_failure:
3159 nlmsg_cancel(skb, nlh);
3160 nlmsg_failure:
3161 kfree_skb(skb);
3162 errout:
3163 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
3164 return 0;
3165 }
3166 #endif
ctnetlink_exp_done(struct netlink_callback * cb)3167 static int ctnetlink_exp_done(struct netlink_callback *cb)
3168 {
3169 if (cb->args[1])
3170 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
3171 return 0;
3172 }
3173
3174 static int
ctnetlink_exp_dump_table(struct sk_buff * skb,struct netlink_callback * cb)3175 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3176 {
3177 struct net *net = sock_net(skb->sk);
3178 struct nf_conntrack_expect *exp, *last;
3179 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3180 u_int8_t l3proto = nfmsg->nfgen_family;
3181
3182 rcu_read_lock();
3183 last = (struct nf_conntrack_expect *)cb->args[1];
3184 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
3185 restart:
3186 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
3187 hnode) {
3188 if (l3proto && exp->tuple.src.l3num != l3proto)
3189 continue;
3190
3191 if (!net_eq(nf_ct_net(exp->master), net))
3192 continue;
3193
3194 if (cb->args[1]) {
3195 if (exp != last)
3196 continue;
3197 cb->args[1] = 0;
3198 }
3199 if (ctnetlink_exp_fill_info(skb,
3200 NETLINK_CB(cb->skb).portid,
3201 cb->nlh->nlmsg_seq,
3202 IPCTNL_MSG_EXP_NEW,
3203 exp) < 0) {
3204 if (!refcount_inc_not_zero(&exp->use))
3205 continue;
3206 cb->args[1] = (unsigned long)exp;
3207 goto out;
3208 }
3209 }
3210 if (cb->args[1]) {
3211 cb->args[1] = 0;
3212 goto restart;
3213 }
3214 }
3215 out:
3216 rcu_read_unlock();
3217 if (last)
3218 nf_ct_expect_put(last);
3219
3220 return skb->len;
3221 }
3222
3223 static int
ctnetlink_exp_ct_dump_table(struct sk_buff * skb,struct netlink_callback * cb)3224 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3225 {
3226 struct nf_conntrack_expect *exp, *last;
3227 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3228 struct nf_conn *ct = cb->data;
3229 struct nf_conn_help *help = nfct_help(ct);
3230 u_int8_t l3proto = nfmsg->nfgen_family;
3231
3232 if (cb->args[0])
3233 return 0;
3234
3235 rcu_read_lock();
3236 last = (struct nf_conntrack_expect *)cb->args[1];
3237 restart:
3238 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
3239 if (l3proto && exp->tuple.src.l3num != l3proto)
3240 continue;
3241 if (cb->args[1]) {
3242 if (exp != last)
3243 continue;
3244 cb->args[1] = 0;
3245 }
3246 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
3247 cb->nlh->nlmsg_seq,
3248 IPCTNL_MSG_EXP_NEW,
3249 exp) < 0) {
3250 if (!refcount_inc_not_zero(&exp->use))
3251 continue;
3252 cb->args[1] = (unsigned long)exp;
3253 goto out;
3254 }
3255 }
3256 if (cb->args[1]) {
3257 cb->args[1] = 0;
3258 goto restart;
3259 }
3260 cb->args[0] = 1;
3261 out:
3262 rcu_read_unlock();
3263 if (last)
3264 nf_ct_expect_put(last);
3265
3266 return skb->len;
3267 }
3268
ctnetlink_dump_exp_ct(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3269 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3270 struct sk_buff *skb,
3271 const struct nlmsghdr *nlh,
3272 const struct nlattr * const cda[],
3273 struct netlink_ext_ack *extack)
3274 {
3275 int err;
3276 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3277 u_int8_t u3 = nfmsg->nfgen_family;
3278 struct nf_conntrack_tuple tuple;
3279 struct nf_conntrack_tuple_hash *h;
3280 struct nf_conn *ct;
3281 struct nf_conntrack_zone zone;
3282 struct netlink_dump_control c = {
3283 .dump = ctnetlink_exp_ct_dump_table,
3284 .done = ctnetlink_exp_done,
3285 };
3286
3287 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3288 u3, NULL);
3289 if (err < 0)
3290 return err;
3291
3292 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3293 if (err < 0)
3294 return err;
3295
3296 h = nf_conntrack_find_get(net, &zone, &tuple);
3297 if (!h)
3298 return -ENOENT;
3299
3300 ct = nf_ct_tuplehash_to_ctrack(h);
3301 /* No expectation linked to this connection tracking. */
3302 if (!nfct_help(ct)) {
3303 nf_ct_put(ct);
3304 return 0;
3305 }
3306
3307 c.data = ct;
3308
3309 err = netlink_dump_start(ctnl, skb, nlh, &c);
3310 nf_ct_put(ct);
3311
3312 return err;
3313 }
3314
ctnetlink_get_expect(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3315 static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
3316 struct sk_buff *skb, const struct nlmsghdr *nlh,
3317 const struct nlattr * const cda[],
3318 struct netlink_ext_ack *extack)
3319 {
3320 struct nf_conntrack_tuple tuple;
3321 struct nf_conntrack_expect *exp;
3322 struct sk_buff *skb2;
3323 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3324 u_int8_t u3 = nfmsg->nfgen_family;
3325 struct nf_conntrack_zone zone;
3326 int err;
3327
3328 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3329 if (cda[CTA_EXPECT_MASTER])
3330 return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
3331 extack);
3332 else {
3333 struct netlink_dump_control c = {
3334 .dump = ctnetlink_exp_dump_table,
3335 .done = ctnetlink_exp_done,
3336 };
3337 return netlink_dump_start(ctnl, skb, nlh, &c);
3338 }
3339 }
3340
3341 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3342 if (err < 0)
3343 return err;
3344
3345 if (cda[CTA_EXPECT_TUPLE])
3346 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3347 u3, NULL);
3348 else if (cda[CTA_EXPECT_MASTER])
3349 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3350 u3, NULL);
3351 else
3352 return -EINVAL;
3353
3354 if (err < 0)
3355 return err;
3356
3357 exp = nf_ct_expect_find_get(net, &zone, &tuple);
3358 if (!exp)
3359 return -ENOENT;
3360
3361 if (cda[CTA_EXPECT_ID]) {
3362 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3363
3364 if (id != nf_expect_get_id(exp)) {
3365 nf_ct_expect_put(exp);
3366 return -ENOENT;
3367 }
3368 }
3369
3370 err = -ENOMEM;
3371 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3372 if (skb2 == NULL) {
3373 nf_ct_expect_put(exp);
3374 goto out;
3375 }
3376
3377 rcu_read_lock();
3378 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
3379 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
3380 rcu_read_unlock();
3381 nf_ct_expect_put(exp);
3382 if (err <= 0)
3383 goto free;
3384
3385 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
3386 if (err < 0)
3387 goto out;
3388
3389 return 0;
3390
3391 free:
3392 kfree_skb(skb2);
3393 out:
3394 /* this avoids a loop in nfnetlink. */
3395 return err == -EAGAIN ? -ENOBUFS : err;
3396 }
3397
expect_iter_name(struct nf_conntrack_expect * exp,void * data)3398 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3399 {
3400 const struct nf_conn_help *m_help;
3401 const char *name = data;
3402
3403 m_help = nfct_help(exp->master);
3404
3405 return strcmp(m_help->helper->name, name) == 0;
3406 }
3407
expect_iter_all(struct nf_conntrack_expect * exp,void * data)3408 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3409 {
3410 return true;
3411 }
3412
ctnetlink_del_expect(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3413 static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
3414 struct sk_buff *skb, const struct nlmsghdr *nlh,
3415 const struct nlattr * const cda[],
3416 struct netlink_ext_ack *extack)
3417 {
3418 struct nf_conntrack_expect *exp;
3419 struct nf_conntrack_tuple tuple;
3420 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3421 u_int8_t u3 = nfmsg->nfgen_family;
3422 struct nf_conntrack_zone zone;
3423 int err;
3424
3425 if (cda[CTA_EXPECT_TUPLE]) {
3426 /* delete a single expect by tuple */
3427 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3428 if (err < 0)
3429 return err;
3430
3431 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3432 u3, NULL);
3433 if (err < 0)
3434 return err;
3435
3436 /* bump usage count to 2 */
3437 exp = nf_ct_expect_find_get(net, &zone, &tuple);
3438 if (!exp)
3439 return -ENOENT;
3440
3441 if (cda[CTA_EXPECT_ID]) {
3442 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3443 if (ntohl(id) != (u32)(unsigned long)exp) {
3444 nf_ct_expect_put(exp);
3445 return -ENOENT;
3446 }
3447 }
3448
3449 /* after list removal, usage count == 1 */
3450 spin_lock_bh(&nf_conntrack_expect_lock);
3451 if (del_timer(&exp->timeout)) {
3452 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3453 nlmsg_report(nlh));
3454 nf_ct_expect_put(exp);
3455 }
3456 spin_unlock_bh(&nf_conntrack_expect_lock);
3457 /* have to put what we 'get' above.
3458 * after this line usage count == 0 */
3459 nf_ct_expect_put(exp);
3460 } else if (cda[CTA_EXPECT_HELP_NAME]) {
3461 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3462
3463 nf_ct_expect_iterate_net(net, expect_iter_name, name,
3464 NETLINK_CB(skb).portid,
3465 nlmsg_report(nlh));
3466 } else {
3467 /* This basically means we have to flush everything*/
3468 nf_ct_expect_iterate_net(net, expect_iter_all, NULL,
3469 NETLINK_CB(skb).portid,
3470 nlmsg_report(nlh));
3471 }
3472
3473 return 0;
3474 }
3475 static int
ctnetlink_change_expect(struct nf_conntrack_expect * x,const struct nlattr * const cda[])3476 ctnetlink_change_expect(struct nf_conntrack_expect *x,
3477 const struct nlattr * const cda[])
3478 {
3479 if (cda[CTA_EXPECT_TIMEOUT]) {
3480 if (!del_timer(&x->timeout))
3481 return -ETIME;
3482
3483 x->timeout.expires = jiffies +
3484 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3485 add_timer(&x->timeout);
3486 }
3487 return 0;
3488 }
3489
3490 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3491 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
3492 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
3493 };
3494
3495 static int
ctnetlink_parse_expect_nat(const struct nlattr * attr,struct nf_conntrack_expect * exp,u_int8_t u3)3496 ctnetlink_parse_expect_nat(const struct nlattr *attr,
3497 struct nf_conntrack_expect *exp,
3498 u_int8_t u3)
3499 {
3500 #if IS_ENABLED(CONFIG_NF_NAT)
3501 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3502 struct nf_conntrack_tuple nat_tuple = {};
3503 int err;
3504
3505 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
3506 exp_nat_nla_policy, NULL);
3507 if (err < 0)
3508 return err;
3509
3510 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3511 return -EINVAL;
3512
3513 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
3514 &nat_tuple, CTA_EXPECT_NAT_TUPLE,
3515 u3, NULL);
3516 if (err < 0)
3517 return err;
3518
3519 exp->saved_addr = nat_tuple.src.u3;
3520 exp->saved_proto = nat_tuple.src.u;
3521 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3522
3523 return 0;
3524 #else
3525 return -EOPNOTSUPP;
3526 #endif
3527 }
3528
3529 static struct nf_conntrack_expect *
ctnetlink_alloc_expect(const struct nlattr * const cda[],struct nf_conn * ct,struct nf_conntrack_helper * helper,struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * mask)3530 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3531 struct nf_conntrack_helper *helper,
3532 struct nf_conntrack_tuple *tuple,
3533 struct nf_conntrack_tuple *mask)
3534 {
3535 u_int32_t class = 0;
3536 struct nf_conntrack_expect *exp;
3537 struct nf_conn_help *help;
3538 int err;
3539
3540 help = nfct_help(ct);
3541 if (!help)
3542 return ERR_PTR(-EOPNOTSUPP);
3543
3544 if (cda[CTA_EXPECT_CLASS] && helper) {
3545 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3546 if (class > helper->expect_class_max)
3547 return ERR_PTR(-EINVAL);
3548 }
3549 exp = nf_ct_expect_alloc(ct);
3550 if (!exp)
3551 return ERR_PTR(-ENOMEM);
3552
3553 if (cda[CTA_EXPECT_FLAGS]) {
3554 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3555 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3556 } else {
3557 exp->flags = 0;
3558 }
3559 if (cda[CTA_EXPECT_FN]) {
3560 const char *name = nla_data(cda[CTA_EXPECT_FN]);
3561 struct nf_ct_helper_expectfn *expfn;
3562
3563 expfn = nf_ct_helper_expectfn_find_by_name(name);
3564 if (expfn == NULL) {
3565 err = -EINVAL;
3566 goto err_out;
3567 }
3568 exp->expectfn = expfn->expectfn;
3569 } else
3570 exp->expectfn = NULL;
3571
3572 exp->class = class;
3573 exp->master = ct;
3574 exp->helper = helper;
3575 exp->tuple = *tuple;
3576 exp->mask.src.u3 = mask->src.u3;
3577 exp->mask.src.u.all = mask->src.u.all;
3578
3579 if (cda[CTA_EXPECT_NAT]) {
3580 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
3581 exp, nf_ct_l3num(ct));
3582 if (err < 0)
3583 goto err_out;
3584 }
3585 return exp;
3586 err_out:
3587 nf_ct_expect_put(exp);
3588 return ERR_PTR(err);
3589 }
3590
3591 static int
ctnetlink_create_expect(struct net * net,const struct nf_conntrack_zone * zone,const struct nlattr * const cda[],u_int8_t u3,u32 portid,int report)3592 ctnetlink_create_expect(struct net *net,
3593 const struct nf_conntrack_zone *zone,
3594 const struct nlattr * const cda[],
3595 u_int8_t u3, u32 portid, int report)
3596 {
3597 struct nf_conntrack_tuple tuple, mask, master_tuple;
3598 struct nf_conntrack_tuple_hash *h = NULL;
3599 struct nf_conntrack_helper *helper = NULL;
3600 struct nf_conntrack_expect *exp;
3601 struct nf_conn *ct;
3602 int err;
3603
3604 /* caller guarantees that those three CTA_EXPECT_* exist */
3605 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3606 u3, NULL);
3607 if (err < 0)
3608 return err;
3609 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
3610 u3, NULL);
3611 if (err < 0)
3612 return err;
3613 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
3614 u3, NULL);
3615 if (err < 0)
3616 return err;
3617
3618 /* Look for master conntrack of this expectation */
3619 h = nf_conntrack_find_get(net, zone, &master_tuple);
3620 if (!h)
3621 return -ENOENT;
3622 ct = nf_ct_tuplehash_to_ctrack(h);
3623
3624 rcu_read_lock();
3625 if (cda[CTA_EXPECT_HELP_NAME]) {
3626 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3627
3628 helper = __nf_conntrack_helper_find(helpname, u3,
3629 nf_ct_protonum(ct));
3630 if (helper == NULL) {
3631 rcu_read_unlock();
3632 #ifdef CONFIG_MODULES
3633 if (request_module("nfct-helper-%s", helpname) < 0) {
3634 err = -EOPNOTSUPP;
3635 goto err_ct;
3636 }
3637 rcu_read_lock();
3638 helper = __nf_conntrack_helper_find(helpname, u3,
3639 nf_ct_protonum(ct));
3640 if (helper) {
3641 err = -EAGAIN;
3642 goto err_rcu;
3643 }
3644 rcu_read_unlock();
3645 #endif
3646 err = -EOPNOTSUPP;
3647 goto err_ct;
3648 }
3649 }
3650
3651 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3652 if (IS_ERR(exp)) {
3653 err = PTR_ERR(exp);
3654 goto err_rcu;
3655 }
3656
3657 err = nf_ct_expect_related_report(exp, portid, report, 0);
3658 nf_ct_expect_put(exp);
3659 err_rcu:
3660 rcu_read_unlock();
3661 err_ct:
3662 nf_ct_put(ct);
3663 return err;
3664 }
3665
ctnetlink_new_expect(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3666 static int ctnetlink_new_expect(struct net *net, struct sock *ctnl,
3667 struct sk_buff *skb, const struct nlmsghdr *nlh,
3668 const struct nlattr * const cda[],
3669 struct netlink_ext_ack *extack)
3670 {
3671 struct nf_conntrack_tuple tuple;
3672 struct nf_conntrack_expect *exp;
3673 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3674 u_int8_t u3 = nfmsg->nfgen_family;
3675 struct nf_conntrack_zone zone;
3676 int err;
3677
3678 if (!cda[CTA_EXPECT_TUPLE]
3679 || !cda[CTA_EXPECT_MASK]
3680 || !cda[CTA_EXPECT_MASTER])
3681 return -EINVAL;
3682
3683 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3684 if (err < 0)
3685 return err;
3686
3687 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3688 u3, NULL);
3689 if (err < 0)
3690 return err;
3691
3692 spin_lock_bh(&nf_conntrack_expect_lock);
3693 exp = __nf_ct_expect_find(net, &zone, &tuple);
3694 if (!exp) {
3695 spin_unlock_bh(&nf_conntrack_expect_lock);
3696 err = -ENOENT;
3697 if (nlh->nlmsg_flags & NLM_F_CREATE) {
3698 err = ctnetlink_create_expect(net, &zone, cda, u3,
3699 NETLINK_CB(skb).portid,
3700 nlmsg_report(nlh));
3701 }
3702 return err;
3703 }
3704
3705 err = -EEXIST;
3706 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3707 err = ctnetlink_change_expect(exp, cda);
3708 spin_unlock_bh(&nf_conntrack_expect_lock);
3709
3710 return err;
3711 }
3712
3713 static int
ctnetlink_exp_stat_fill_info(struct sk_buff * skb,u32 portid,u32 seq,int cpu,const struct ip_conntrack_stat * st)3714 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3715 const struct ip_conntrack_stat *st)
3716 {
3717 struct nlmsghdr *nlh;
3718 struct nfgenmsg *nfmsg;
3719 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3720
3721 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3722 IPCTNL_MSG_EXP_GET_STATS_CPU);
3723 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3724 if (nlh == NULL)
3725 goto nlmsg_failure;
3726
3727 nfmsg = nlmsg_data(nlh);
3728 nfmsg->nfgen_family = AF_UNSPEC;
3729 nfmsg->version = NFNETLINK_V0;
3730 nfmsg->res_id = htons(cpu);
3731
3732 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3733 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3734 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3735 goto nla_put_failure;
3736
3737 nlmsg_end(skb, nlh);
3738 return skb->len;
3739
3740 nla_put_failure:
3741 nlmsg_failure:
3742 nlmsg_cancel(skb, nlh);
3743 return -1;
3744 }
3745
3746 static int
ctnetlink_exp_stat_cpu_dump(struct sk_buff * skb,struct netlink_callback * cb)3747 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3748 {
3749 int cpu;
3750 struct net *net = sock_net(skb->sk);
3751
3752 if (cb->args[0] == nr_cpu_ids)
3753 return 0;
3754
3755 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3756 const struct ip_conntrack_stat *st;
3757
3758 if (!cpu_possible(cpu))
3759 continue;
3760
3761 st = per_cpu_ptr(net->ct.stat, cpu);
3762 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3763 cb->nlh->nlmsg_seq,
3764 cpu, st) < 0)
3765 break;
3766 }
3767 cb->args[0] = cpu;
3768
3769 return skb->len;
3770 }
3771
ctnetlink_stat_exp_cpu(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3772 static int ctnetlink_stat_exp_cpu(struct net *net, struct sock *ctnl,
3773 struct sk_buff *skb,
3774 const struct nlmsghdr *nlh,
3775 const struct nlattr * const cda[],
3776 struct netlink_ext_ack *extack)
3777 {
3778 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3779 struct netlink_dump_control c = {
3780 .dump = ctnetlink_exp_stat_cpu_dump,
3781 };
3782 return netlink_dump_start(ctnl, skb, nlh, &c);
3783 }
3784
3785 return 0;
3786 }
3787
3788 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3789 static struct nf_ct_event_notifier ctnl_notifier = {
3790 .fcn = ctnetlink_conntrack_event,
3791 };
3792
3793 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3794 .fcn = ctnetlink_expect_event,
3795 };
3796 #endif
3797
3798 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3799 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
3800 .attr_count = CTA_MAX,
3801 .policy = ct_nla_policy },
3802 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
3803 .attr_count = CTA_MAX,
3804 .policy = ct_nla_policy },
3805 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
3806 .attr_count = CTA_MAX,
3807 .policy = ct_nla_policy },
3808 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
3809 .attr_count = CTA_MAX,
3810 .policy = ct_nla_policy },
3811 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
3812 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
3813 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
3814 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
3815 };
3816
3817 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3818 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
3819 .attr_count = CTA_EXPECT_MAX,
3820 .policy = exp_nla_policy },
3821 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
3822 .attr_count = CTA_EXPECT_MAX,
3823 .policy = exp_nla_policy },
3824 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
3825 .attr_count = CTA_EXPECT_MAX,
3826 .policy = exp_nla_policy },
3827 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
3828 };
3829
3830 static const struct nfnetlink_subsystem ctnl_subsys = {
3831 .name = "conntrack",
3832 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3833 .cb_count = IPCTNL_MSG_MAX,
3834 .cb = ctnl_cb,
3835 };
3836
3837 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3838 .name = "conntrack_expect",
3839 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3840 .cb_count = IPCTNL_MSG_EXP_MAX,
3841 .cb = ctnl_exp_cb,
3842 };
3843
3844 MODULE_ALIAS("ip_conntrack_netlink");
3845 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3846 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3847
ctnetlink_net_init(struct net * net)3848 static int __net_init ctnetlink_net_init(struct net *net)
3849 {
3850 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3851 int ret;
3852
3853 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3854 if (ret < 0) {
3855 pr_err("ctnetlink_init: cannot register notifier.\n");
3856 goto err_out;
3857 }
3858
3859 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3860 if (ret < 0) {
3861 pr_err("ctnetlink_init: cannot expect register notifier.\n");
3862 goto err_unreg_notifier;
3863 }
3864 #endif
3865 return 0;
3866
3867 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3868 err_unreg_notifier:
3869 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3870 err_out:
3871 return ret;
3872 #endif
3873 }
3874
ctnetlink_net_exit(struct net * net)3875 static void ctnetlink_net_exit(struct net *net)
3876 {
3877 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3878 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3879 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3880 #endif
3881 }
3882
ctnetlink_net_exit_batch(struct list_head * net_exit_list)3883 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3884 {
3885 struct net *net;
3886
3887 list_for_each_entry(net, net_exit_list, exit_list)
3888 ctnetlink_net_exit(net);
3889
3890 /* wait for other cpus until they are done with ctnl_notifiers */
3891 synchronize_rcu();
3892 }
3893
3894 static struct pernet_operations ctnetlink_net_ops = {
3895 .init = ctnetlink_net_init,
3896 .exit_batch = ctnetlink_net_exit_batch,
3897 };
3898
ctnetlink_init(void)3899 static int __init ctnetlink_init(void)
3900 {
3901 int ret;
3902
3903 ret = nfnetlink_subsys_register(&ctnl_subsys);
3904 if (ret < 0) {
3905 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3906 goto err_out;
3907 }
3908
3909 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3910 if (ret < 0) {
3911 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3912 goto err_unreg_subsys;
3913 }
3914
3915 ret = register_pernet_subsys(&ctnetlink_net_ops);
3916 if (ret < 0) {
3917 pr_err("ctnetlink_init: cannot register pernet operations\n");
3918 goto err_unreg_exp_subsys;
3919 }
3920 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3921 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3922 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3923 #endif
3924 return 0;
3925
3926 err_unreg_exp_subsys:
3927 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3928 err_unreg_subsys:
3929 nfnetlink_subsys_unregister(&ctnl_subsys);
3930 err_out:
3931 return ret;
3932 }
3933
ctnetlink_exit(void)3934 static void __exit ctnetlink_exit(void)
3935 {
3936 unregister_pernet_subsys(&ctnetlink_net_ops);
3937 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3938 nfnetlink_subsys_unregister(&ctnl_subsys);
3939 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3940 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3941 #endif
3942 synchronize_rcu();
3943 }
3944
3945 module_init(ctnetlink_init);
3946 module_exit(ctnetlink_exit);
3947