1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29
30 /*
31 * Our network namespace constructor/destructor lists
32 */
33
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50
51 static bool init_net_initialized;
52 /*
53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54 * init_net_initialized and first_device pointer.
55 * This is internal net namespace object. Please, don't use it
56 * outside.
57 */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59 EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
60
61 #define MIN_PERNET_OPS_ID \
62 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
63
64 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
65
66 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
67
68 DEFINE_COOKIE(net_cookie);
69
net_alloc_generic(void)70 static struct net_generic *net_alloc_generic(void)
71 {
72 unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
73 unsigned int generic_size;
74 struct net_generic *ng;
75
76 generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
77
78 ng = kzalloc(generic_size, GFP_KERNEL);
79 if (ng)
80 ng->s.len = gen_ptrs;
81
82 return ng;
83 }
84
net_assign_generic(struct net * net,unsigned int id,void * data)85 static int net_assign_generic(struct net *net, unsigned int id, void *data)
86 {
87 struct net_generic *ng, *old_ng;
88
89 BUG_ON(id < MIN_PERNET_OPS_ID);
90
91 old_ng = rcu_dereference_protected(net->gen,
92 lockdep_is_held(&pernet_ops_rwsem));
93 if (old_ng->s.len > id) {
94 old_ng->ptr[id] = data;
95 return 0;
96 }
97
98 ng = net_alloc_generic();
99 if (!ng)
100 return -ENOMEM;
101
102 /*
103 * Some synchronisation notes:
104 *
105 * The net_generic explores the net->gen array inside rcu
106 * read section. Besides once set the net->gen->ptr[x]
107 * pointer never changes (see rules in netns/generic.h).
108 *
109 * That said, we simply duplicate this array and schedule
110 * the old copy for kfree after a grace period.
111 */
112
113 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
114 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
115 ng->ptr[id] = data;
116
117 rcu_assign_pointer(net->gen, ng);
118 kfree_rcu(old_ng, s.rcu);
119 return 0;
120 }
121
ops_init(const struct pernet_operations * ops,struct net * net)122 static int ops_init(const struct pernet_operations *ops, struct net *net)
123 {
124 struct net_generic *ng;
125 int err = -ENOMEM;
126 void *data = NULL;
127
128 if (ops->id) {
129 data = kzalloc(ops->size, GFP_KERNEL);
130 if (!data)
131 goto out;
132
133 err = net_assign_generic(net, *ops->id, data);
134 if (err)
135 goto cleanup;
136 }
137 err = 0;
138 if (ops->init)
139 err = ops->init(net);
140 if (!err)
141 return 0;
142
143 if (ops->id) {
144 ng = rcu_dereference_protected(net->gen,
145 lockdep_is_held(&pernet_ops_rwsem));
146 ng->ptr[*ops->id] = NULL;
147 }
148
149 cleanup:
150 kfree(data);
151
152 out:
153 return err;
154 }
155
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)156 static void ops_pre_exit_list(const struct pernet_operations *ops,
157 struct list_head *net_exit_list)
158 {
159 struct net *net;
160
161 if (ops->pre_exit) {
162 list_for_each_entry(net, net_exit_list, exit_list)
163 ops->pre_exit(net);
164 }
165 }
166
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)167 static void ops_exit_list(const struct pernet_operations *ops,
168 struct list_head *net_exit_list)
169 {
170 struct net *net;
171 if (ops->exit) {
172 list_for_each_entry(net, net_exit_list, exit_list) {
173 ops->exit(net);
174 cond_resched();
175 }
176 }
177 if (ops->exit_batch)
178 ops->exit_batch(net_exit_list);
179 }
180
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)181 static void ops_free_list(const struct pernet_operations *ops,
182 struct list_head *net_exit_list)
183 {
184 struct net *net;
185
186 if (ops->id) {
187 list_for_each_entry(net, net_exit_list, exit_list)
188 kfree(net_generic(net, *ops->id));
189 }
190 }
191
192 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)193 static int alloc_netid(struct net *net, struct net *peer, int reqid)
194 {
195 int min = 0, max = 0;
196
197 if (reqid >= 0) {
198 min = reqid;
199 max = reqid + 1;
200 }
201
202 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
203 }
204
205 /* This function is used by idr_for_each(). If net is equal to peer, the
206 * function returns the id so that idr_for_each() stops. Because we cannot
207 * returns the id 0 (idr_for_each() will not stop), we return the magic value
208 * NET_ID_ZERO (-1) for it.
209 */
210 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)211 static int net_eq_idr(int id, void *net, void *peer)
212 {
213 if (net_eq(net, peer))
214 return id ? : NET_ID_ZERO;
215 return 0;
216 }
217
218 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)219 static int __peernet2id(const struct net *net, struct net *peer)
220 {
221 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
222
223 /* Magic value for id 0. */
224 if (id == NET_ID_ZERO)
225 return 0;
226 if (id > 0)
227 return id;
228
229 return NETNSA_NSID_NOT_ASSIGNED;
230 }
231
232 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
233 struct nlmsghdr *nlh, gfp_t gfp);
234 /* This function returns the id of a peer netns. If no id is assigned, one will
235 * be allocated and returned.
236 */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)237 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
238 {
239 int id;
240
241 if (refcount_read(&net->ns.count) == 0)
242 return NETNSA_NSID_NOT_ASSIGNED;
243
244 spin_lock_bh(&net->nsid_lock);
245 id = __peernet2id(net, peer);
246 if (id >= 0) {
247 spin_unlock_bh(&net->nsid_lock);
248 return id;
249 }
250
251 /* When peer is obtained from RCU lists, we may race with
252 * its cleanup. Check whether it's alive, and this guarantees
253 * we never hash a peer back to net->netns_ids, after it has
254 * just been idr_remove()'d from there in cleanup_net().
255 */
256 if (!maybe_get_net(peer)) {
257 spin_unlock_bh(&net->nsid_lock);
258 return NETNSA_NSID_NOT_ASSIGNED;
259 }
260
261 id = alloc_netid(net, peer, -1);
262 spin_unlock_bh(&net->nsid_lock);
263
264 put_net(peer);
265 if (id < 0)
266 return NETNSA_NSID_NOT_ASSIGNED;
267
268 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
269
270 return id;
271 }
272 EXPORT_SYMBOL_GPL(peernet2id_alloc);
273
274 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)275 int peernet2id(const struct net *net, struct net *peer)
276 {
277 int id;
278
279 rcu_read_lock();
280 id = __peernet2id(net, peer);
281 rcu_read_unlock();
282
283 return id;
284 }
285 EXPORT_SYMBOL(peernet2id);
286
287 /* This function returns true is the peer netns has an id assigned into the
288 * current netns.
289 */
peernet_has_id(const struct net * net,struct net * peer)290 bool peernet_has_id(const struct net *net, struct net *peer)
291 {
292 return peernet2id(net, peer) >= 0;
293 }
294
get_net_ns_by_id(const struct net * net,int id)295 struct net *get_net_ns_by_id(const struct net *net, int id)
296 {
297 struct net *peer;
298
299 if (id < 0)
300 return NULL;
301
302 rcu_read_lock();
303 peer = idr_find(&net->netns_ids, id);
304 if (peer)
305 peer = maybe_get_net(peer);
306 rcu_read_unlock();
307
308 return peer;
309 }
310 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
311
preinit_net_sysctl(struct net * net)312 static __net_init void preinit_net_sysctl(struct net *net)
313 {
314 net->core.sysctl_somaxconn = SOMAXCONN;
315 /* Limits per socket sk_omem_alloc usage.
316 * TCP zerocopy regular usage needs 128 KB.
317 */
318 net->core.sysctl_optmem_max = 128 * 1024;
319 net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
320 }
321
322 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)323 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
324 {
325 refcount_set(&net->passive, 1);
326 refcount_set(&net->ns.count, 1);
327 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
328 ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
329
330 get_random_bytes(&net->hash_mix, sizeof(u32));
331 net->dev_base_seq = 1;
332 net->user_ns = user_ns;
333
334 idr_init(&net->netns_ids);
335 spin_lock_init(&net->nsid_lock);
336 mutex_init(&net->ipv4.ra_mutex);
337 preinit_net_sysctl(net);
338 }
339
340 /*
341 * setup_net runs the initializers for the network namespace object.
342 */
setup_net(struct net * net)343 static __net_init int setup_net(struct net *net)
344 {
345 /* Must be called with pernet_ops_rwsem held */
346 const struct pernet_operations *ops, *saved_ops;
347 LIST_HEAD(net_exit_list);
348 LIST_HEAD(dev_kill_list);
349 int error = 0;
350
351 preempt_disable();
352 net->net_cookie = gen_cookie_next(&net_cookie);
353 preempt_enable();
354
355 list_for_each_entry(ops, &pernet_list, list) {
356 error = ops_init(ops, net);
357 if (error < 0)
358 goto out_undo;
359 }
360 down_write(&net_rwsem);
361 list_add_tail_rcu(&net->list, &net_namespace_list);
362 up_write(&net_rwsem);
363 out:
364 return error;
365
366 out_undo:
367 /* Walk through the list backwards calling the exit functions
368 * for the pernet modules whose init functions did not fail.
369 */
370 list_add(&net->exit_list, &net_exit_list);
371 saved_ops = ops;
372 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
373 ops_pre_exit_list(ops, &net_exit_list);
374
375 synchronize_rcu();
376
377 ops = saved_ops;
378 rtnl_lock();
379 list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
380 if (ops->exit_batch_rtnl)
381 ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
382 }
383 unregister_netdevice_many(&dev_kill_list);
384 rtnl_unlock();
385
386 ops = saved_ops;
387 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
388 ops_exit_list(ops, &net_exit_list);
389
390 ops = saved_ops;
391 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
392 ops_free_list(ops, &net_exit_list);
393
394 rcu_barrier();
395 goto out;
396 }
397
398 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)399 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
400 {
401 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
402 }
403
dec_net_namespaces(struct ucounts * ucounts)404 static void dec_net_namespaces(struct ucounts *ucounts)
405 {
406 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
407 }
408
409 static struct kmem_cache *net_cachep __ro_after_init;
410 static struct workqueue_struct *netns_wq;
411
net_alloc(void)412 static struct net *net_alloc(void)
413 {
414 struct net *net = NULL;
415 struct net_generic *ng;
416
417 ng = net_alloc_generic();
418 if (!ng)
419 goto out;
420
421 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
422 if (!net)
423 goto out_free;
424
425 #ifdef CONFIG_KEYS
426 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
427 if (!net->key_domain)
428 goto out_free_2;
429 refcount_set(&net->key_domain->usage, 1);
430 #endif
431
432 rcu_assign_pointer(net->gen, ng);
433 out:
434 return net;
435
436 #ifdef CONFIG_KEYS
437 out_free_2:
438 kmem_cache_free(net_cachep, net);
439 net = NULL;
440 #endif
441 out_free:
442 kfree(ng);
443 goto out;
444 }
445
446 static LLIST_HEAD(defer_free_list);
447
net_complete_free(void)448 static void net_complete_free(void)
449 {
450 struct llist_node *kill_list;
451 struct net *net, *next;
452
453 /* Get the list of namespaces to free from last round. */
454 kill_list = llist_del_all(&defer_free_list);
455
456 llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
457 kmem_cache_free(net_cachep, net);
458
459 }
460
net_passive_dec(struct net * net)461 void net_passive_dec(struct net *net)
462 {
463 if (refcount_dec_and_test(&net->passive)) {
464 kfree(rcu_access_pointer(net->gen));
465
466 /* There should not be any trackers left there. */
467 ref_tracker_dir_exit(&net->notrefcnt_tracker);
468
469 /* Wait for an extra rcu_barrier() before final free. */
470 llist_add(&net->defer_free_list, &defer_free_list);
471 }
472 }
473
net_drop_ns(void * p)474 void net_drop_ns(void *p)
475 {
476 struct net *net = (struct net *)p;
477
478 if (net)
479 net_passive_dec(net);
480 }
481
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)482 struct net *copy_net_ns(unsigned long flags,
483 struct user_namespace *user_ns, struct net *old_net)
484 {
485 struct ucounts *ucounts;
486 struct net *net;
487 int rv;
488
489 if (!(flags & CLONE_NEWNET))
490 return get_net(old_net);
491
492 ucounts = inc_net_namespaces(user_ns);
493 if (!ucounts)
494 return ERR_PTR(-ENOSPC);
495
496 net = net_alloc();
497 if (!net) {
498 rv = -ENOMEM;
499 goto dec_ucounts;
500 }
501
502 preinit_net(net, user_ns);
503 net->ucounts = ucounts;
504 get_user_ns(user_ns);
505
506 rv = down_read_killable(&pernet_ops_rwsem);
507 if (rv < 0)
508 goto put_userns;
509
510 rv = setup_net(net);
511
512 up_read(&pernet_ops_rwsem);
513
514 if (rv < 0) {
515 put_userns:
516 #ifdef CONFIG_KEYS
517 key_remove_domain(net->key_domain);
518 #endif
519 put_user_ns(user_ns);
520 net_passive_dec(net);
521 dec_ucounts:
522 dec_net_namespaces(ucounts);
523 return ERR_PTR(rv);
524 }
525 return net;
526 }
527
528 /**
529 * net_ns_get_ownership - get sysfs ownership data for @net
530 * @net: network namespace in question (can be NULL)
531 * @uid: kernel user ID for sysfs objects
532 * @gid: kernel group ID for sysfs objects
533 *
534 * Returns the uid/gid pair of root in the user namespace associated with the
535 * given network namespace.
536 */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)537 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
538 {
539 if (net) {
540 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
541 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
542
543 if (uid_valid(ns_root_uid))
544 *uid = ns_root_uid;
545
546 if (gid_valid(ns_root_gid))
547 *gid = ns_root_gid;
548 } else {
549 *uid = GLOBAL_ROOT_UID;
550 *gid = GLOBAL_ROOT_GID;
551 }
552 }
553 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
554
unhash_nsid(struct net * net,struct net * last)555 static void unhash_nsid(struct net *net, struct net *last)
556 {
557 struct net *tmp;
558 /* This function is only called from cleanup_net() work,
559 * and this work is the only process, that may delete
560 * a net from net_namespace_list. So, when the below
561 * is executing, the list may only grow. Thus, we do not
562 * use for_each_net_rcu() or net_rwsem.
563 */
564 for_each_net(tmp) {
565 int id;
566
567 spin_lock_bh(&tmp->nsid_lock);
568 id = __peernet2id(tmp, net);
569 if (id >= 0)
570 idr_remove(&tmp->netns_ids, id);
571 spin_unlock_bh(&tmp->nsid_lock);
572 if (id >= 0)
573 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
574 GFP_KERNEL);
575 if (tmp == last)
576 break;
577 }
578 spin_lock_bh(&net->nsid_lock);
579 idr_destroy(&net->netns_ids);
580 spin_unlock_bh(&net->nsid_lock);
581 }
582
583 static LLIST_HEAD(cleanup_list);
584
cleanup_net(struct work_struct * work)585 static void cleanup_net(struct work_struct *work)
586 {
587 const struct pernet_operations *ops;
588 struct net *net, *tmp, *last;
589 struct llist_node *net_kill_list;
590 LIST_HEAD(net_exit_list);
591 LIST_HEAD(dev_kill_list);
592
593 /* Atomically snapshot the list of namespaces to cleanup */
594 net_kill_list = llist_del_all(&cleanup_list);
595
596 down_read(&pernet_ops_rwsem);
597
598 /* Don't let anyone else find us. */
599 down_write(&net_rwsem);
600 llist_for_each_entry(net, net_kill_list, cleanup_list)
601 list_del_rcu(&net->list);
602 /* Cache last net. After we unlock rtnl, no one new net
603 * added to net_namespace_list can assign nsid pointer
604 * to a net from net_kill_list (see peernet2id_alloc()).
605 * So, we skip them in unhash_nsid().
606 *
607 * Note, that unhash_nsid() does not delete nsid links
608 * between net_kill_list's nets, as they've already
609 * deleted from net_namespace_list. But, this would be
610 * useless anyway, as netns_ids are destroyed there.
611 */
612 last = list_last_entry(&net_namespace_list, struct net, list);
613 up_write(&net_rwsem);
614
615 llist_for_each_entry(net, net_kill_list, cleanup_list) {
616 unhash_nsid(net, last);
617 list_add_tail(&net->exit_list, &net_exit_list);
618 }
619
620 /* Run all of the network namespace pre_exit methods */
621 list_for_each_entry_reverse(ops, &pernet_list, list)
622 ops_pre_exit_list(ops, &net_exit_list);
623
624 /*
625 * Another CPU might be rcu-iterating the list, wait for it.
626 * This needs to be before calling the exit() notifiers, so
627 * the rcu_barrier() below isn't sufficient alone.
628 * Also the pre_exit() and exit() methods need this barrier.
629 */
630 synchronize_rcu_expedited();
631
632 rtnl_lock();
633 list_for_each_entry_reverse(ops, &pernet_list, list) {
634 if (ops->exit_batch_rtnl)
635 ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
636 }
637 unregister_netdevice_many(&dev_kill_list);
638 rtnl_unlock();
639
640 /* Run all of the network namespace exit methods */
641 list_for_each_entry_reverse(ops, &pernet_list, list)
642 ops_exit_list(ops, &net_exit_list);
643
644 /* Free the net generic variables */
645 list_for_each_entry_reverse(ops, &pernet_list, list)
646 ops_free_list(ops, &net_exit_list);
647
648 up_read(&pernet_ops_rwsem);
649
650 /* Ensure there are no outstanding rcu callbacks using this
651 * network namespace.
652 */
653 rcu_barrier();
654
655 net_complete_free();
656
657 /* Finally it is safe to free my network namespace structure */
658 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
659 list_del_init(&net->exit_list);
660 dec_net_namespaces(net->ucounts);
661 #ifdef CONFIG_KEYS
662 key_remove_domain(net->key_domain);
663 #endif
664 put_user_ns(net->user_ns);
665 net_passive_dec(net);
666 }
667 }
668
669 /**
670 * net_ns_barrier - wait until concurrent net_cleanup_work is done
671 *
672 * cleanup_net runs from work queue and will first remove namespaces
673 * from the global list, then run net exit functions.
674 *
675 * Call this in module exit path to make sure that all netns
676 * ->exit ops have been invoked before the function is removed.
677 */
net_ns_barrier(void)678 void net_ns_barrier(void)
679 {
680 down_write(&pernet_ops_rwsem);
681 up_write(&pernet_ops_rwsem);
682 }
683 EXPORT_SYMBOL(net_ns_barrier);
684
685 static DECLARE_WORK(net_cleanup_work, cleanup_net);
686
__put_net(struct net * net)687 void __put_net(struct net *net)
688 {
689 ref_tracker_dir_exit(&net->refcnt_tracker);
690 /* Cleanup the network namespace in process context */
691 if (llist_add(&net->cleanup_list, &cleanup_list))
692 queue_work(netns_wq, &net_cleanup_work);
693 }
694 EXPORT_SYMBOL_GPL(__put_net);
695
696 /**
697 * get_net_ns - increment the refcount of the network namespace
698 * @ns: common namespace (net)
699 *
700 * Returns the net's common namespace or ERR_PTR() if ref is zero.
701 */
get_net_ns(struct ns_common * ns)702 struct ns_common *get_net_ns(struct ns_common *ns)
703 {
704 struct net *net;
705
706 net = maybe_get_net(container_of(ns, struct net, ns));
707 if (net)
708 return &net->ns;
709 return ERR_PTR(-EINVAL);
710 }
711 EXPORT_SYMBOL_GPL(get_net_ns);
712
get_net_ns_by_fd(int fd)713 struct net *get_net_ns_by_fd(int fd)
714 {
715 struct fd f = fdget(fd);
716 struct net *net = ERR_PTR(-EINVAL);
717
718 if (!fd_file(f))
719 return ERR_PTR(-EBADF);
720
721 if (proc_ns_file(fd_file(f))) {
722 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
723 if (ns->ops == &netns_operations)
724 net = get_net(container_of(ns, struct net, ns));
725 }
726 fdput(f);
727
728 return net;
729 }
730 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
731 #endif
732
get_net_ns_by_pid(pid_t pid)733 struct net *get_net_ns_by_pid(pid_t pid)
734 {
735 struct task_struct *tsk;
736 struct net *net;
737
738 /* Lookup the network namespace */
739 net = ERR_PTR(-ESRCH);
740 rcu_read_lock();
741 tsk = find_task_by_vpid(pid);
742 if (tsk) {
743 struct nsproxy *nsproxy;
744 task_lock(tsk);
745 nsproxy = tsk->nsproxy;
746 if (nsproxy)
747 net = get_net(nsproxy->net_ns);
748 task_unlock(tsk);
749 }
750 rcu_read_unlock();
751 return net;
752 }
753 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
754
net_ns_net_init(struct net * net)755 static __net_init int net_ns_net_init(struct net *net)
756 {
757 #ifdef CONFIG_NET_NS
758 net->ns.ops = &netns_operations;
759 #endif
760 return ns_alloc_inum(&net->ns);
761 }
762
net_ns_net_exit(struct net * net)763 static __net_exit void net_ns_net_exit(struct net *net)
764 {
765 ns_free_inum(&net->ns);
766 }
767
768 static struct pernet_operations __net_initdata net_ns_ops = {
769 .init = net_ns_net_init,
770 .exit = net_ns_net_exit,
771 };
772
773 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
774 [NETNSA_NONE] = { .type = NLA_UNSPEC },
775 [NETNSA_NSID] = { .type = NLA_S32 },
776 [NETNSA_PID] = { .type = NLA_U32 },
777 [NETNSA_FD] = { .type = NLA_U32 },
778 [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
779 };
780
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)781 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
782 struct netlink_ext_ack *extack)
783 {
784 struct net *net = sock_net(skb->sk);
785 struct nlattr *tb[NETNSA_MAX + 1];
786 struct nlattr *nla;
787 struct net *peer;
788 int nsid, err;
789
790 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
791 NETNSA_MAX, rtnl_net_policy, extack);
792 if (err < 0)
793 return err;
794 if (!tb[NETNSA_NSID]) {
795 NL_SET_ERR_MSG(extack, "nsid is missing");
796 return -EINVAL;
797 }
798 nsid = nla_get_s32(tb[NETNSA_NSID]);
799
800 if (tb[NETNSA_PID]) {
801 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
802 nla = tb[NETNSA_PID];
803 } else if (tb[NETNSA_FD]) {
804 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
805 nla = tb[NETNSA_FD];
806 } else {
807 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
808 return -EINVAL;
809 }
810 if (IS_ERR(peer)) {
811 NL_SET_BAD_ATTR(extack, nla);
812 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
813 return PTR_ERR(peer);
814 }
815
816 spin_lock_bh(&net->nsid_lock);
817 if (__peernet2id(net, peer) >= 0) {
818 spin_unlock_bh(&net->nsid_lock);
819 err = -EEXIST;
820 NL_SET_BAD_ATTR(extack, nla);
821 NL_SET_ERR_MSG(extack,
822 "Peer netns already has a nsid assigned");
823 goto out;
824 }
825
826 err = alloc_netid(net, peer, nsid);
827 spin_unlock_bh(&net->nsid_lock);
828 if (err >= 0) {
829 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
830 nlh, GFP_KERNEL);
831 err = 0;
832 } else if (err == -ENOSPC && nsid >= 0) {
833 err = -EEXIST;
834 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
835 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
836 }
837 out:
838 put_net(peer);
839 return err;
840 }
841
rtnl_net_get_size(void)842 static int rtnl_net_get_size(void)
843 {
844 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
845 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
846 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
847 ;
848 }
849
850 struct net_fill_args {
851 u32 portid;
852 u32 seq;
853 int flags;
854 int cmd;
855 int nsid;
856 bool add_ref;
857 int ref_nsid;
858 };
859
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)860 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
861 {
862 struct nlmsghdr *nlh;
863 struct rtgenmsg *rth;
864
865 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
866 args->flags);
867 if (!nlh)
868 return -EMSGSIZE;
869
870 rth = nlmsg_data(nlh);
871 rth->rtgen_family = AF_UNSPEC;
872
873 if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
874 goto nla_put_failure;
875
876 if (args->add_ref &&
877 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
878 goto nla_put_failure;
879
880 nlmsg_end(skb, nlh);
881 return 0;
882
883 nla_put_failure:
884 nlmsg_cancel(skb, nlh);
885 return -EMSGSIZE;
886 }
887
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)888 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
889 const struct nlmsghdr *nlh,
890 struct nlattr **tb,
891 struct netlink_ext_ack *extack)
892 {
893 int i, err;
894
895 if (!netlink_strict_get_check(skb))
896 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
897 tb, NETNSA_MAX, rtnl_net_policy,
898 extack);
899
900 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
901 NETNSA_MAX, rtnl_net_policy,
902 extack);
903 if (err)
904 return err;
905
906 for (i = 0; i <= NETNSA_MAX; i++) {
907 if (!tb[i])
908 continue;
909
910 switch (i) {
911 case NETNSA_PID:
912 case NETNSA_FD:
913 case NETNSA_NSID:
914 case NETNSA_TARGET_NSID:
915 break;
916 default:
917 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
918 return -EINVAL;
919 }
920 }
921
922 return 0;
923 }
924
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)925 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
926 struct netlink_ext_ack *extack)
927 {
928 struct net *net = sock_net(skb->sk);
929 struct nlattr *tb[NETNSA_MAX + 1];
930 struct net_fill_args fillargs = {
931 .portid = NETLINK_CB(skb).portid,
932 .seq = nlh->nlmsg_seq,
933 .cmd = RTM_NEWNSID,
934 };
935 struct net *peer, *target = net;
936 struct nlattr *nla;
937 struct sk_buff *msg;
938 int err;
939
940 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
941 if (err < 0)
942 return err;
943 if (tb[NETNSA_PID]) {
944 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
945 nla = tb[NETNSA_PID];
946 } else if (tb[NETNSA_FD]) {
947 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
948 nla = tb[NETNSA_FD];
949 } else if (tb[NETNSA_NSID]) {
950 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
951 if (!peer)
952 peer = ERR_PTR(-ENOENT);
953 nla = tb[NETNSA_NSID];
954 } else {
955 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
956 return -EINVAL;
957 }
958
959 if (IS_ERR(peer)) {
960 NL_SET_BAD_ATTR(extack, nla);
961 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
962 return PTR_ERR(peer);
963 }
964
965 if (tb[NETNSA_TARGET_NSID]) {
966 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
967
968 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
969 if (IS_ERR(target)) {
970 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
971 NL_SET_ERR_MSG(extack,
972 "Target netns reference is invalid");
973 err = PTR_ERR(target);
974 goto out;
975 }
976 fillargs.add_ref = true;
977 fillargs.ref_nsid = peernet2id(net, peer);
978 }
979
980 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
981 if (!msg) {
982 err = -ENOMEM;
983 goto out;
984 }
985
986 fillargs.nsid = peernet2id(target, peer);
987 err = rtnl_net_fill(msg, &fillargs);
988 if (err < 0)
989 goto err_out;
990
991 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
992 goto out;
993
994 err_out:
995 nlmsg_free(msg);
996 out:
997 if (fillargs.add_ref)
998 put_net(target);
999 put_net(peer);
1000 return err;
1001 }
1002
1003 struct rtnl_net_dump_cb {
1004 struct net *tgt_net;
1005 struct net *ref_net;
1006 struct sk_buff *skb;
1007 struct net_fill_args fillargs;
1008 int idx;
1009 int s_idx;
1010 };
1011
1012 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1013 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1014 {
1015 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1016 int ret;
1017
1018 if (net_cb->idx < net_cb->s_idx)
1019 goto cont;
1020
1021 net_cb->fillargs.nsid = id;
1022 if (net_cb->fillargs.add_ref)
1023 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1024 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1025 if (ret < 0)
1026 return ret;
1027
1028 cont:
1029 net_cb->idx++;
1030 return 0;
1031 }
1032
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1033 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1034 struct rtnl_net_dump_cb *net_cb,
1035 struct netlink_callback *cb)
1036 {
1037 struct netlink_ext_ack *extack = cb->extack;
1038 struct nlattr *tb[NETNSA_MAX + 1];
1039 int err, i;
1040
1041 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1042 NETNSA_MAX, rtnl_net_policy,
1043 extack);
1044 if (err < 0)
1045 return err;
1046
1047 for (i = 0; i <= NETNSA_MAX; i++) {
1048 if (!tb[i])
1049 continue;
1050
1051 if (i == NETNSA_TARGET_NSID) {
1052 struct net *net;
1053
1054 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1055 if (IS_ERR(net)) {
1056 NL_SET_BAD_ATTR(extack, tb[i]);
1057 NL_SET_ERR_MSG(extack,
1058 "Invalid target network namespace id");
1059 return PTR_ERR(net);
1060 }
1061 net_cb->fillargs.add_ref = true;
1062 net_cb->ref_net = net_cb->tgt_net;
1063 net_cb->tgt_net = net;
1064 } else {
1065 NL_SET_BAD_ATTR(extack, tb[i]);
1066 NL_SET_ERR_MSG(extack,
1067 "Unsupported attribute in dump request");
1068 return -EINVAL;
1069 }
1070 }
1071
1072 return 0;
1073 }
1074
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1075 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1076 {
1077 struct rtnl_net_dump_cb net_cb = {
1078 .tgt_net = sock_net(skb->sk),
1079 .skb = skb,
1080 .fillargs = {
1081 .portid = NETLINK_CB(cb->skb).portid,
1082 .seq = cb->nlh->nlmsg_seq,
1083 .flags = NLM_F_MULTI,
1084 .cmd = RTM_NEWNSID,
1085 },
1086 .idx = 0,
1087 .s_idx = cb->args[0],
1088 };
1089 int err = 0;
1090
1091 if (cb->strict_check) {
1092 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1093 if (err < 0)
1094 goto end;
1095 }
1096
1097 rcu_read_lock();
1098 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1099 rcu_read_unlock();
1100
1101 cb->args[0] = net_cb.idx;
1102 end:
1103 if (net_cb.fillargs.add_ref)
1104 put_net(net_cb.tgt_net);
1105 return err;
1106 }
1107
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1108 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1109 struct nlmsghdr *nlh, gfp_t gfp)
1110 {
1111 struct net_fill_args fillargs = {
1112 .portid = portid,
1113 .seq = nlh ? nlh->nlmsg_seq : 0,
1114 .cmd = cmd,
1115 .nsid = id,
1116 };
1117 struct sk_buff *msg;
1118 int err = -ENOMEM;
1119
1120 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1121 if (!msg)
1122 goto out;
1123
1124 err = rtnl_net_fill(msg, &fillargs);
1125 if (err < 0)
1126 goto err_out;
1127
1128 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1129 return;
1130
1131 err_out:
1132 nlmsg_free(msg);
1133 out:
1134 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1135 }
1136
1137 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1138 static void __init netns_ipv4_struct_check(void)
1139 {
1140 /* TX readonly hotpath cache lines */
1141 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1142 sysctl_tcp_early_retrans);
1143 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1144 sysctl_tcp_tso_win_divisor);
1145 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1146 sysctl_tcp_tso_rtt_log);
1147 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1148 sysctl_tcp_autocorking);
1149 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1150 sysctl_tcp_min_snd_mss);
1151 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1152 sysctl_tcp_notsent_lowat);
1153 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1154 sysctl_tcp_limit_output_bytes);
1155 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1156 sysctl_tcp_min_rtt_wlen);
1157 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1158 sysctl_tcp_wmem);
1159 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1160 sysctl_ip_fwd_use_pmtu);
1161 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1162
1163 /* TXRX readonly hotpath cache lines */
1164 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1165 sysctl_tcp_moderate_rcvbuf);
1166 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1167
1168 /* RX readonly hotpath cache line */
1169 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1170 sysctl_ip_early_demux);
1171 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1172 sysctl_tcp_early_demux);
1173 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1174 sysctl_tcp_reordering);
1175 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1176 sysctl_tcp_rmem);
1177 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
1178 }
1179 #endif
1180
net_ns_init(void)1181 void __init net_ns_init(void)
1182 {
1183 struct net_generic *ng;
1184
1185 #ifdef CONFIG_NET_NS
1186 netns_ipv4_struct_check();
1187 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1188 SMP_CACHE_BYTES,
1189 SLAB_PANIC|SLAB_ACCOUNT, NULL);
1190
1191 /* Create workqueue for cleanup */
1192 netns_wq = create_singlethread_workqueue("netns");
1193 if (!netns_wq)
1194 panic("Could not create netns workq");
1195 #endif
1196
1197 ng = net_alloc_generic();
1198 if (!ng)
1199 panic("Could not allocate generic netns");
1200
1201 rcu_assign_pointer(init_net.gen, ng);
1202
1203 #ifdef CONFIG_KEYS
1204 init_net.key_domain = &init_net_key_domain;
1205 #endif
1206 preinit_net(&init_net, &init_user_ns);
1207
1208 down_write(&pernet_ops_rwsem);
1209 if (setup_net(&init_net))
1210 panic("Could not setup the initial network namespace");
1211
1212 init_net_initialized = true;
1213 up_write(&pernet_ops_rwsem);
1214
1215 if (register_pernet_subsys(&net_ns_ops))
1216 panic("Could not register network namespace subsystems");
1217
1218 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
1219 RTNL_FLAG_DOIT_UNLOCKED);
1220 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
1221 RTNL_FLAG_DOIT_UNLOCKED |
1222 RTNL_FLAG_DUMP_UNLOCKED);
1223 }
1224
free_exit_list(struct pernet_operations * ops,struct list_head * net_exit_list)1225 static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1226 {
1227 ops_pre_exit_list(ops, net_exit_list);
1228 synchronize_rcu();
1229
1230 if (ops->exit_batch_rtnl) {
1231 LIST_HEAD(dev_kill_list);
1232
1233 rtnl_lock();
1234 ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1235 unregister_netdevice_many(&dev_kill_list);
1236 rtnl_unlock();
1237 }
1238 ops_exit_list(ops, net_exit_list);
1239
1240 ops_free_list(ops, net_exit_list);
1241 }
1242
1243 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1244 static int __register_pernet_operations(struct list_head *list,
1245 struct pernet_operations *ops)
1246 {
1247 struct net *net;
1248 int error;
1249 LIST_HEAD(net_exit_list);
1250
1251 list_add_tail(&ops->list, list);
1252 if (ops->init || ops->id) {
1253 /* We held write locked pernet_ops_rwsem, and parallel
1254 * setup_net() and cleanup_net() are not possible.
1255 */
1256 for_each_net(net) {
1257 error = ops_init(ops, net);
1258 if (error)
1259 goto out_undo;
1260 list_add_tail(&net->exit_list, &net_exit_list);
1261 }
1262 }
1263 return 0;
1264
1265 out_undo:
1266 /* If I have an error cleanup all namespaces I initialized */
1267 list_del(&ops->list);
1268 free_exit_list(ops, &net_exit_list);
1269 return error;
1270 }
1271
__unregister_pernet_operations(struct pernet_operations * ops)1272 static void __unregister_pernet_operations(struct pernet_operations *ops)
1273 {
1274 struct net *net;
1275 LIST_HEAD(net_exit_list);
1276
1277 list_del(&ops->list);
1278 /* See comment in __register_pernet_operations() */
1279 for_each_net(net)
1280 list_add_tail(&net->exit_list, &net_exit_list);
1281
1282 free_exit_list(ops, &net_exit_list);
1283 }
1284
1285 #else
1286
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1287 static int __register_pernet_operations(struct list_head *list,
1288 struct pernet_operations *ops)
1289 {
1290 if (!init_net_initialized) {
1291 list_add_tail(&ops->list, list);
1292 return 0;
1293 }
1294
1295 return ops_init(ops, &init_net);
1296 }
1297
__unregister_pernet_operations(struct pernet_operations * ops)1298 static void __unregister_pernet_operations(struct pernet_operations *ops)
1299 {
1300 if (!init_net_initialized) {
1301 list_del(&ops->list);
1302 } else {
1303 LIST_HEAD(net_exit_list);
1304 list_add(&init_net.exit_list, &net_exit_list);
1305 free_exit_list(ops, &net_exit_list);
1306 }
1307 }
1308
1309 #endif /* CONFIG_NET_NS */
1310
1311 static DEFINE_IDA(net_generic_ids);
1312
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1313 static int register_pernet_operations(struct list_head *list,
1314 struct pernet_operations *ops)
1315 {
1316 int error;
1317
1318 if (WARN_ON(!!ops->id ^ !!ops->size))
1319 return -EINVAL;
1320
1321 if (ops->id) {
1322 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1323 GFP_KERNEL);
1324 if (error < 0)
1325 return error;
1326 *ops->id = error;
1327 /* This does not require READ_ONCE as writers already hold
1328 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1329 * net_alloc_generic.
1330 */
1331 WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1332 }
1333 error = __register_pernet_operations(list, ops);
1334 if (error) {
1335 rcu_barrier();
1336 if (ops->id)
1337 ida_free(&net_generic_ids, *ops->id);
1338 }
1339
1340 return error;
1341 }
1342
unregister_pernet_operations(struct pernet_operations * ops)1343 static void unregister_pernet_operations(struct pernet_operations *ops)
1344 {
1345 __unregister_pernet_operations(ops);
1346 rcu_barrier();
1347 if (ops->id)
1348 ida_free(&net_generic_ids, *ops->id);
1349 }
1350
1351 /**
1352 * register_pernet_subsys - register a network namespace subsystem
1353 * @ops: pernet operations structure for the subsystem
1354 *
1355 * Register a subsystem which has init and exit functions
1356 * that are called when network namespaces are created and
1357 * destroyed respectively.
1358 *
1359 * When registered all network namespace init functions are
1360 * called for every existing network namespace. Allowing kernel
1361 * modules to have a race free view of the set of network namespaces.
1362 *
1363 * When a new network namespace is created all of the init
1364 * methods are called in the order in which they were registered.
1365 *
1366 * When a network namespace is destroyed all of the exit methods
1367 * are called in the reverse of the order with which they were
1368 * registered.
1369 */
register_pernet_subsys(struct pernet_operations * ops)1370 int register_pernet_subsys(struct pernet_operations *ops)
1371 {
1372 int error;
1373 down_write(&pernet_ops_rwsem);
1374 error = register_pernet_operations(first_device, ops);
1375 up_write(&pernet_ops_rwsem);
1376 return error;
1377 }
1378 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1379
1380 /**
1381 * unregister_pernet_subsys - unregister a network namespace subsystem
1382 * @ops: pernet operations structure to manipulate
1383 *
1384 * Remove the pernet operations structure from the list to be
1385 * used when network namespaces are created or destroyed. In
1386 * addition run the exit method for all existing network
1387 * namespaces.
1388 */
unregister_pernet_subsys(struct pernet_operations * ops)1389 void unregister_pernet_subsys(struct pernet_operations *ops)
1390 {
1391 down_write(&pernet_ops_rwsem);
1392 unregister_pernet_operations(ops);
1393 up_write(&pernet_ops_rwsem);
1394 }
1395 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1396
1397 /**
1398 * register_pernet_device - register a network namespace device
1399 * @ops: pernet operations structure for the subsystem
1400 *
1401 * Register a device which has init and exit functions
1402 * that are called when network namespaces are created and
1403 * destroyed respectively.
1404 *
1405 * When registered all network namespace init functions are
1406 * called for every existing network namespace. Allowing kernel
1407 * modules to have a race free view of the set of network namespaces.
1408 *
1409 * When a new network namespace is created all of the init
1410 * methods are called in the order in which they were registered.
1411 *
1412 * When a network namespace is destroyed all of the exit methods
1413 * are called in the reverse of the order with which they were
1414 * registered.
1415 */
register_pernet_device(struct pernet_operations * ops)1416 int register_pernet_device(struct pernet_operations *ops)
1417 {
1418 int error;
1419 down_write(&pernet_ops_rwsem);
1420 error = register_pernet_operations(&pernet_list, ops);
1421 if (!error && (first_device == &pernet_list))
1422 first_device = &ops->list;
1423 up_write(&pernet_ops_rwsem);
1424 return error;
1425 }
1426 EXPORT_SYMBOL_GPL(register_pernet_device);
1427
1428 /**
1429 * unregister_pernet_device - unregister a network namespace netdevice
1430 * @ops: pernet operations structure to manipulate
1431 *
1432 * Remove the pernet operations structure from the list to be
1433 * used when network namespaces are created or destroyed. In
1434 * addition run the exit method for all existing network
1435 * namespaces.
1436 */
unregister_pernet_device(struct pernet_operations * ops)1437 void unregister_pernet_device(struct pernet_operations *ops)
1438 {
1439 down_write(&pernet_ops_rwsem);
1440 if (&ops->list == first_device)
1441 first_device = first_device->next;
1442 unregister_pernet_operations(ops);
1443 up_write(&pernet_ops_rwsem);
1444 }
1445 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1446
1447 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1448 static struct ns_common *netns_get(struct task_struct *task)
1449 {
1450 struct net *net = NULL;
1451 struct nsproxy *nsproxy;
1452
1453 task_lock(task);
1454 nsproxy = task->nsproxy;
1455 if (nsproxy)
1456 net = get_net(nsproxy->net_ns);
1457 task_unlock(task);
1458
1459 return net ? &net->ns : NULL;
1460 }
1461
to_net_ns(struct ns_common * ns)1462 static inline struct net *to_net_ns(struct ns_common *ns)
1463 {
1464 return container_of(ns, struct net, ns);
1465 }
1466
netns_put(struct ns_common * ns)1467 static void netns_put(struct ns_common *ns)
1468 {
1469 put_net(to_net_ns(ns));
1470 }
1471
netns_install(struct nsset * nsset,struct ns_common * ns)1472 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1473 {
1474 struct nsproxy *nsproxy = nsset->nsproxy;
1475 struct net *net = to_net_ns(ns);
1476
1477 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1478 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1479 return -EPERM;
1480
1481 put_net(nsproxy->net_ns);
1482 nsproxy->net_ns = get_net(net);
1483 return 0;
1484 }
1485
netns_owner(struct ns_common * ns)1486 static struct user_namespace *netns_owner(struct ns_common *ns)
1487 {
1488 return to_net_ns(ns)->user_ns;
1489 }
1490
1491 const struct proc_ns_operations netns_operations = {
1492 .name = "net",
1493 .type = CLONE_NEWNET,
1494 .get = netns_get,
1495 .put = netns_put,
1496 .install = netns_install,
1497 .owner = netns_owner,
1498 };
1499 #endif
1500