1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Operations on the network namespace
4 */
5 #ifndef __NET_NET_NAMESPACE_H
6 #define __NET_NET_NAMESPACE_H
7
8 #include <linux/atomic.h>
9 #include <linux/refcount.h>
10 #include <linux/workqueue.h>
11 #include <linux/list.h>
12 #include <linux/sysctl.h>
13 #include <linux/uidgid.h>
14
15 #include <net/flow.h>
16 #include <net/netns/core.h>
17 #include <net/netns/mib.h>
18 #include <net/netns/unix.h>
19 #include <net/netns/packet.h>
20 #include <net/netns/ipv4.h>
21 #include <net/netns/ipv6.h>
22 #include <net/netns/nexthop.h>
23 #include <net/netns/ieee802154_6lowpan.h>
24 #include <net/netns/sctp.h>
25 #include <net/netns/netfilter.h>
26 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
27 #include <net/netns/conntrack.h>
28 #endif
29 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
30 #include <net/netns/flow_table.h>
31 #endif
32 #include <net/netns/nftables.h>
33 #include <net/netns/xfrm.h>
34 #include <net/netns/mpls.h>
35 #include <net/netns/can.h>
36 #include <net/netns/xdp.h>
37 #include <net/netns/smc.h>
38 #include <net/netns/bpf.h>
39 #include <net/netns/mctp.h>
40 #include <net/net_trackers.h>
41 #include <linux/ns_common.h>
42 #include <linux/idr.h>
43 #include <linux/skbuff.h>
44 #include <linux/notifier.h>
45 #include <linux/xarray.h>
46
47 struct user_namespace;
48 struct proc_dir_entry;
49 struct net_device;
50 struct sock;
51 struct ctl_table_header;
52 struct net_generic;
53 struct uevent_sock;
54 struct netns_ipvs;
55 struct bpf_prog;
56
57
58 #define NETDEV_HASHBITS 8
59 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
60
61 struct net {
62 /* First cache line can be often dirtied.
63 * Do not place here read-mostly fields.
64 */
65 refcount_t passive; /* To decide when the network
66 * namespace should be freed.
67 */
68 spinlock_t rules_mod_lock;
69
70 unsigned int dev_base_seq; /* protected by rtnl_mutex */
71 u32 ifindex;
72
73 spinlock_t nsid_lock;
74 atomic_t fnhe_genid;
75
76 struct list_head list; /* list of network namespaces */
77 struct list_head exit_list; /* To linked to call pernet exit
78 * methods on dead net (
79 * pernet_ops_rwsem read locked),
80 * or to unregister pernet ops
81 * (pernet_ops_rwsem write locked).
82 */
83 struct llist_node defer_free_list;
84 struct llist_node cleanup_list; /* namespaces on death row */
85
86 #ifdef CONFIG_KEYS
87 struct key_tag *key_domain; /* Key domain of operation tag */
88 #endif
89 struct user_namespace *user_ns; /* Owning user namespace */
90 struct ucounts *ucounts;
91 struct idr netns_ids;
92
93 struct ns_common ns;
94 struct ref_tracker_dir refcnt_tracker;
95 struct ref_tracker_dir notrefcnt_tracker; /* tracker for objects not
96 * refcounted against netns
97 */
98 struct list_head dev_base_head;
99 struct proc_dir_entry *proc_net;
100 struct proc_dir_entry *proc_net_stat;
101
102 #ifdef CONFIG_SYSCTL
103 struct ctl_table_set sysctls;
104 #endif
105
106 struct sock *rtnl; /* rtnetlink socket */
107 struct sock *genl_sock;
108
109 struct uevent_sock *uevent_sock; /* uevent socket */
110
111 struct hlist_head *dev_name_head;
112 struct hlist_head *dev_index_head;
113 struct xarray dev_by_index;
114 struct raw_notifier_head netdev_chain;
115
116 /* Note that @hash_mix can be read millions times per second,
117 * it is critical that it is on a read_mostly cache line.
118 */
119 u32 hash_mix;
120
121 struct net_device *loopback_dev; /* The loopback */
122
123 /* core fib_rules */
124 struct list_head rules_ops;
125
126 struct netns_core core;
127 struct netns_mib mib;
128 struct netns_packet packet;
129 #if IS_ENABLED(CONFIG_UNIX)
130 struct netns_unix unx;
131 #endif
132 struct netns_nexthop nexthop;
133 struct netns_ipv4 ipv4;
134 #if IS_ENABLED(CONFIG_IPV6)
135 struct netns_ipv6 ipv6;
136 #endif
137 #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
138 struct netns_ieee802154_lowpan ieee802154_lowpan;
139 #endif
140 #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
141 struct netns_sctp sctp;
142 #endif
143 #ifdef CONFIG_NETFILTER
144 struct netns_nf nf;
145 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
146 struct netns_ct ct;
147 #endif
148 #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
149 struct netns_nftables nft;
150 #endif
151 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
152 struct netns_ft ft;
153 #endif
154 #endif
155 #ifdef CONFIG_WEXT_CORE
156 struct sk_buff_head wext_nlevents;
157 #endif
158 struct net_generic __rcu *gen;
159
160 /* Used to store attached BPF programs */
161 struct netns_bpf bpf;
162
163 /* Note : following structs are cache line aligned */
164 #ifdef CONFIG_XFRM
165 struct netns_xfrm xfrm;
166 #endif
167
168 u64 net_cookie; /* written once */
169
170 #if IS_ENABLED(CONFIG_IP_VS)
171 struct netns_ipvs *ipvs;
172 #endif
173 #if IS_ENABLED(CONFIG_MPLS)
174 struct netns_mpls mpls;
175 #endif
176 #if IS_ENABLED(CONFIG_CAN)
177 struct netns_can can;
178 #endif
179 #ifdef CONFIG_XDP_SOCKETS
180 struct netns_xdp xdp;
181 #endif
182 #if IS_ENABLED(CONFIG_MCTP)
183 struct netns_mctp mctp;
184 #endif
185 #if IS_ENABLED(CONFIG_CRYPTO_USER)
186 struct sock *crypto_nlsk;
187 #endif
188 struct sock *diag_nlsk;
189 #if IS_ENABLED(CONFIG_SMC)
190 struct netns_smc smc;
191 #endif
192 } __randomize_layout;
193
194 #include <linux/seq_file_net.h>
195
196 /* Init's network namespace */
197 extern struct net init_net;
198
199 #ifdef CONFIG_NET_NS
200 struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
201 struct net *old_net);
202
203 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
204
205 void net_ns_barrier(void);
206
207 struct ns_common *get_net_ns(struct ns_common *ns);
208 struct net *get_net_ns_by_fd(int fd);
209 #else /* CONFIG_NET_NS */
210 #include <linux/sched.h>
211 #include <linux/nsproxy.h>
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)212 static inline struct net *copy_net_ns(unsigned long flags,
213 struct user_namespace *user_ns, struct net *old_net)
214 {
215 if (flags & CLONE_NEWNET)
216 return ERR_PTR(-EINVAL);
217 return old_net;
218 }
219
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)220 static inline void net_ns_get_ownership(const struct net *net,
221 kuid_t *uid, kgid_t *gid)
222 {
223 *uid = GLOBAL_ROOT_UID;
224 *gid = GLOBAL_ROOT_GID;
225 }
226
net_ns_barrier(void)227 static inline void net_ns_barrier(void) {}
228
get_net_ns(struct ns_common * ns)229 static inline struct ns_common *get_net_ns(struct ns_common *ns)
230 {
231 return ERR_PTR(-EINVAL);
232 }
233
get_net_ns_by_fd(int fd)234 static inline struct net *get_net_ns_by_fd(int fd)
235 {
236 return ERR_PTR(-EINVAL);
237 }
238 #endif /* CONFIG_NET_NS */
239
240
241 extern struct list_head net_namespace_list;
242
243 struct net *get_net_ns_by_pid(pid_t pid);
244
245 #ifdef CONFIG_SYSCTL
246 void ipx_register_sysctl(void);
247 void ipx_unregister_sysctl(void);
248 #else
249 #define ipx_register_sysctl()
250 #define ipx_unregister_sysctl()
251 #endif
252
253 #ifdef CONFIG_NET_NS
254 void __put_net(struct net *net);
255
256 /* Try using get_net_track() instead */
get_net(struct net * net)257 static inline struct net *get_net(struct net *net)
258 {
259 refcount_inc(&net->ns.count);
260 return net;
261 }
262
maybe_get_net(struct net * net)263 static inline struct net *maybe_get_net(struct net *net)
264 {
265 /* Used when we know struct net exists but we
266 * aren't guaranteed a previous reference count
267 * exists. If the reference count is zero this
268 * function fails and returns NULL.
269 */
270 if (!refcount_inc_not_zero(&net->ns.count))
271 net = NULL;
272 return net;
273 }
274
275 /* Try using put_net_track() instead */
put_net(struct net * net)276 static inline void put_net(struct net *net)
277 {
278 if (refcount_dec_and_test(&net->ns.count))
279 __put_net(net);
280 }
281
282 static inline
net_eq(const struct net * net1,const struct net * net2)283 int net_eq(const struct net *net1, const struct net *net2)
284 {
285 return net1 == net2;
286 }
287
check_net(const struct net * net)288 static inline int check_net(const struct net *net)
289 {
290 return refcount_read(&net->ns.count) != 0;
291 }
292
293 void net_drop_ns(void *);
294 void net_passive_dec(struct net *net);
295
296 #else
297
get_net(struct net * net)298 static inline struct net *get_net(struct net *net)
299 {
300 return net;
301 }
302
put_net(struct net * net)303 static inline void put_net(struct net *net)
304 {
305 }
306
maybe_get_net(struct net * net)307 static inline struct net *maybe_get_net(struct net *net)
308 {
309 return net;
310 }
311
312 static inline
net_eq(const struct net * net1,const struct net * net2)313 int net_eq(const struct net *net1, const struct net *net2)
314 {
315 return 1;
316 }
317
check_net(const struct net * net)318 static inline int check_net(const struct net *net)
319 {
320 return 1;
321 }
322
323 #define net_drop_ns NULL
324
net_passive_dec(struct net * net)325 static inline void net_passive_dec(struct net *net)
326 {
327 refcount_dec(&net->passive);
328 }
329 #endif
330
net_passive_inc(struct net * net)331 static inline void net_passive_inc(struct net *net)
332 {
333 refcount_inc(&net->passive);
334 }
335
336 /* Returns true if the netns initialization is completed successfully */
net_initialized(const struct net * net)337 static inline bool net_initialized(const struct net *net)
338 {
339 return READ_ONCE(net->list.next);
340 }
341
__netns_tracker_alloc(struct net * net,netns_tracker * tracker,bool refcounted,gfp_t gfp)342 static inline void __netns_tracker_alloc(struct net *net,
343 netns_tracker *tracker,
344 bool refcounted,
345 gfp_t gfp)
346 {
347 #ifdef CONFIG_NET_NS_REFCNT_TRACKER
348 ref_tracker_alloc(refcounted ? &net->refcnt_tracker :
349 &net->notrefcnt_tracker,
350 tracker, gfp);
351 #endif
352 }
353
netns_tracker_alloc(struct net * net,netns_tracker * tracker,gfp_t gfp)354 static inline void netns_tracker_alloc(struct net *net, netns_tracker *tracker,
355 gfp_t gfp)
356 {
357 __netns_tracker_alloc(net, tracker, true, gfp);
358 }
359
__netns_tracker_free(struct net * net,netns_tracker * tracker,bool refcounted)360 static inline void __netns_tracker_free(struct net *net,
361 netns_tracker *tracker,
362 bool refcounted)
363 {
364 #ifdef CONFIG_NET_NS_REFCNT_TRACKER
365 ref_tracker_free(refcounted ? &net->refcnt_tracker :
366 &net->notrefcnt_tracker, tracker);
367 #endif
368 }
369
get_net_track(struct net * net,netns_tracker * tracker,gfp_t gfp)370 static inline struct net *get_net_track(struct net *net,
371 netns_tracker *tracker, gfp_t gfp)
372 {
373 get_net(net);
374 netns_tracker_alloc(net, tracker, gfp);
375 return net;
376 }
377
put_net_track(struct net * net,netns_tracker * tracker)378 static inline void put_net_track(struct net *net, netns_tracker *tracker)
379 {
380 __netns_tracker_free(net, tracker, true);
381 put_net(net);
382 }
383
384 typedef struct {
385 #ifdef CONFIG_NET_NS
386 struct net __rcu *net;
387 #endif
388 } possible_net_t;
389
write_pnet(possible_net_t * pnet,struct net * net)390 static inline void write_pnet(possible_net_t *pnet, struct net *net)
391 {
392 #ifdef CONFIG_NET_NS
393 rcu_assign_pointer(pnet->net, net);
394 #endif
395 }
396
read_pnet(const possible_net_t * pnet)397 static inline struct net *read_pnet(const possible_net_t *pnet)
398 {
399 #ifdef CONFIG_NET_NS
400 return rcu_dereference_protected(pnet->net, true);
401 #else
402 return &init_net;
403 #endif
404 }
405
read_pnet_rcu(const possible_net_t * pnet)406 static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
407 {
408 #ifdef CONFIG_NET_NS
409 return rcu_dereference(pnet->net);
410 #else
411 return &init_net;
412 #endif
413 }
414
415 /* Protected by net_rwsem */
416 #define for_each_net(VAR) \
417 list_for_each_entry(VAR, &net_namespace_list, list)
418 #define for_each_net_continue_reverse(VAR) \
419 list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
420 #define for_each_net_rcu(VAR) \
421 list_for_each_entry_rcu(VAR, &net_namespace_list, list)
422
423 #ifdef CONFIG_NET_NS
424 #define __net_init
425 #define __net_exit
426 #define __net_initdata
427 #define __net_initconst
428 #else
429 #define __net_init __init
430 #define __net_exit __ref
431 #define __net_initdata __initdata
432 #define __net_initconst __initconst
433 #endif
434
435 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
436 int peernet2id(const struct net *net, struct net *peer);
437 bool peernet_has_id(const struct net *net, struct net *peer);
438 struct net *get_net_ns_by_id(const struct net *net, int id);
439
440 struct pernet_operations {
441 struct list_head list;
442 /*
443 * Below methods are called without any exclusive locks.
444 * More than one net may be constructed and destructed
445 * in parallel on several cpus. Every pernet_operations
446 * have to keep in mind all other pernet_operations and
447 * to introduce a locking, if they share common resources.
448 *
449 * The only time they are called with exclusive lock is
450 * from register_pernet_subsys(), unregister_pernet_subsys()
451 * register_pernet_device() and unregister_pernet_device().
452 *
453 * Exit methods using blocking RCU primitives, such as
454 * synchronize_rcu(), should be implemented via exit_batch.
455 * Then, destruction of a group of net requires single
456 * synchronize_rcu() related to these pernet_operations,
457 * instead of separate synchronize_rcu() for every net.
458 * Please, avoid synchronize_rcu() at all, where it's possible.
459 *
460 * Note that a combination of pre_exit() and exit() can
461 * be used, since a synchronize_rcu() is guaranteed between
462 * the calls.
463 */
464 int (*init)(struct net *net);
465 void (*pre_exit)(struct net *net);
466 void (*exit)(struct net *net);
467 void (*exit_batch)(struct list_head *net_exit_list);
468 /* Following method is called with RTNL held. */
469 void (*exit_batch_rtnl)(struct list_head *net_exit_list,
470 struct list_head *dev_kill_list);
471 unsigned int * const id;
472 const size_t size;
473 };
474
475 /*
476 * Use these carefully. If you implement a network device and it
477 * needs per network namespace operations use device pernet operations,
478 * otherwise use pernet subsys operations.
479 *
480 * Network interfaces need to be removed from a dying netns _before_
481 * subsys notifiers can be called, as most of the network code cleanup
482 * (which is done from subsys notifiers) runs with the assumption that
483 * dev_remove_pack has been called so no new packets will arrive during
484 * and after the cleanup functions have been called. dev_remove_pack
485 * is not per namespace so instead the guarantee of no more packets
486 * arriving in a network namespace is provided by ensuring that all
487 * network devices and all sockets have left the network namespace
488 * before the cleanup methods are called.
489 *
490 * For the longest time the ipv4 icmp code was registered as a pernet
491 * device which caused kernel oops, and panics during network
492 * namespace cleanup. So please don't get this wrong.
493 */
494 int register_pernet_subsys(struct pernet_operations *);
495 void unregister_pernet_subsys(struct pernet_operations *);
496 int register_pernet_device(struct pernet_operations *);
497 void unregister_pernet_device(struct pernet_operations *);
498
499 struct ctl_table;
500
501 #define register_net_sysctl(net, path, table) \
502 register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table))
503 #ifdef CONFIG_SYSCTL
504 int net_sysctl_init(void);
505 struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path,
506 struct ctl_table *table, size_t table_size);
507 void unregister_net_sysctl_table(struct ctl_table_header *header);
508 #else
net_sysctl_init(void)509 static inline int net_sysctl_init(void) { return 0; }
register_net_sysctl_sz(struct net * net,const char * path,struct ctl_table * table,size_t table_size)510 static inline struct ctl_table_header *register_net_sysctl_sz(struct net *net,
511 const char *path, struct ctl_table *table, size_t table_size)
512 {
513 return NULL;
514 }
unregister_net_sysctl_table(struct ctl_table_header * header)515 static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
516 {
517 }
518 #endif
519
rt_genid_ipv4(const struct net * net)520 static inline int rt_genid_ipv4(const struct net *net)
521 {
522 return atomic_read(&net->ipv4.rt_genid);
523 }
524
525 #if IS_ENABLED(CONFIG_IPV6)
rt_genid_ipv6(const struct net * net)526 static inline int rt_genid_ipv6(const struct net *net)
527 {
528 return atomic_read(&net->ipv6.fib6_sernum);
529 }
530 #endif
531
rt_genid_bump_ipv4(struct net * net)532 static inline void rt_genid_bump_ipv4(struct net *net)
533 {
534 atomic_inc(&net->ipv4.rt_genid);
535 }
536
537 extern void (*__fib6_flush_trees)(struct net *net);
rt_genid_bump_ipv6(struct net * net)538 static inline void rt_genid_bump_ipv6(struct net *net)
539 {
540 if (__fib6_flush_trees)
541 __fib6_flush_trees(net);
542 }
543
544 #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
545 static inline struct netns_ieee802154_lowpan *
net_ieee802154_lowpan(struct net * net)546 net_ieee802154_lowpan(struct net *net)
547 {
548 return &net->ieee802154_lowpan;
549 }
550 #endif
551
552 /* For callers who don't really care about whether it's IPv4 or IPv6 */
rt_genid_bump_all(struct net * net)553 static inline void rt_genid_bump_all(struct net *net)
554 {
555 rt_genid_bump_ipv4(net);
556 rt_genid_bump_ipv6(net);
557 }
558
fnhe_genid(const struct net * net)559 static inline int fnhe_genid(const struct net *net)
560 {
561 return atomic_read(&net->fnhe_genid);
562 }
563
fnhe_genid_bump(struct net * net)564 static inline void fnhe_genid_bump(struct net *net)
565 {
566 atomic_inc(&net->fnhe_genid);
567 }
568
569 #ifdef CONFIG_NET
570 void net_ns_init(void);
571 #else
net_ns_init(void)572 static inline void net_ns_init(void) {}
573 #endif
574
575 #endif /* __NET_NET_NAMESPACE_H */
576