• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
13 #include <linux/fs.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
20 #include <linux/uidgid.h>
21 
22 #include <net/sock.h>
23 #include <net/netlink.h>
24 #include <net/net_namespace.h>
25 #include <net/netns/generic.h>
26 
27 /*
28  *	Our network namespace constructor/destructor lists
29  */
30 
31 static LIST_HEAD(pernet_list);
32 static struct list_head *first_device = &pernet_list;
33 
34 LIST_HEAD(net_namespace_list);
35 EXPORT_SYMBOL_GPL(net_namespace_list);
36 
37 /* Protects net_namespace_list. Nests iside rtnl_lock() */
38 DECLARE_RWSEM(net_rwsem);
39 EXPORT_SYMBOL_GPL(net_rwsem);
40 
41 struct net init_net = {
42 	.count		= REFCOUNT_INIT(1),
43 	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
44 };
45 EXPORT_SYMBOL(init_net);
46 
47 static bool init_net_initialized;
48 /*
49  * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
50  * init_net_initialized and first_device pointer.
51  * This is internal net namespace object. Please, don't use it
52  * outside.
53  */
54 DECLARE_RWSEM(pernet_ops_rwsem);
55 EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
56 
57 #define MIN_PERNET_OPS_ID	\
58 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
59 
60 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
61 
62 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
63 
net_alloc_generic(void)64 static struct net_generic *net_alloc_generic(void)
65 {
66 	struct net_generic *ng;
67 	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
68 
69 	ng = kzalloc(generic_size, GFP_KERNEL);
70 	if (ng)
71 		ng->s.len = max_gen_ptrs;
72 
73 	return ng;
74 }
75 
net_assign_generic(struct net * net,unsigned int id,void * data)76 static int net_assign_generic(struct net *net, unsigned int id, void *data)
77 {
78 	struct net_generic *ng, *old_ng;
79 
80 	BUG_ON(id < MIN_PERNET_OPS_ID);
81 
82 	old_ng = rcu_dereference_protected(net->gen,
83 					   lockdep_is_held(&pernet_ops_rwsem));
84 	if (old_ng->s.len > id) {
85 		old_ng->ptr[id] = data;
86 		return 0;
87 	}
88 
89 	ng = net_alloc_generic();
90 	if (ng == NULL)
91 		return -ENOMEM;
92 
93 	/*
94 	 * Some synchronisation notes:
95 	 *
96 	 * The net_generic explores the net->gen array inside rcu
97 	 * read section. Besides once set the net->gen->ptr[x]
98 	 * pointer never changes (see rules in netns/generic.h).
99 	 *
100 	 * That said, we simply duplicate this array and schedule
101 	 * the old copy for kfree after a grace period.
102 	 */
103 
104 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
105 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
106 	ng->ptr[id] = data;
107 
108 	rcu_assign_pointer(net->gen, ng);
109 	kfree_rcu(old_ng, s.rcu);
110 	return 0;
111 }
112 
ops_init(const struct pernet_operations * ops,struct net * net)113 static int ops_init(const struct pernet_operations *ops, struct net *net)
114 {
115 	int err = -ENOMEM;
116 	void *data = NULL;
117 
118 	if (ops->id && ops->size) {
119 		data = kzalloc(ops->size, GFP_KERNEL);
120 		if (!data)
121 			goto out;
122 
123 		err = net_assign_generic(net, *ops->id, data);
124 		if (err)
125 			goto cleanup;
126 	}
127 	err = 0;
128 	if (ops->init)
129 		err = ops->init(net);
130 	if (!err)
131 		return 0;
132 
133 cleanup:
134 	kfree(data);
135 
136 out:
137 	return err;
138 }
139 
ops_free(const struct pernet_operations * ops,struct net * net)140 static void ops_free(const struct pernet_operations *ops, struct net *net)
141 {
142 	if (ops->id && ops->size) {
143 		kfree(net_generic(net, *ops->id));
144 	}
145 }
146 
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)147 static void ops_exit_list(const struct pernet_operations *ops,
148 			  struct list_head *net_exit_list)
149 {
150 	struct net *net;
151 	if (ops->exit) {
152 		list_for_each_entry(net, net_exit_list, exit_list)
153 			ops->exit(net);
154 	}
155 	if (ops->exit_batch)
156 		ops->exit_batch(net_exit_list);
157 }
158 
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)159 static void ops_free_list(const struct pernet_operations *ops,
160 			  struct list_head *net_exit_list)
161 {
162 	struct net *net;
163 	if (ops->size && ops->id) {
164 		list_for_each_entry(net, net_exit_list, exit_list)
165 			ops_free(ops, net);
166 	}
167 }
168 
169 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)170 static int alloc_netid(struct net *net, struct net *peer, int reqid)
171 {
172 	int min = 0, max = 0;
173 
174 	if (reqid >= 0) {
175 		min = reqid;
176 		max = reqid + 1;
177 	}
178 
179 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
180 }
181 
182 /* This function is used by idr_for_each(). If net is equal to peer, the
183  * function returns the id so that idr_for_each() stops. Because we cannot
184  * returns the id 0 (idr_for_each() will not stop), we return the magic value
185  * NET_ID_ZERO (-1) for it.
186  */
187 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)188 static int net_eq_idr(int id, void *net, void *peer)
189 {
190 	if (net_eq(net, peer))
191 		return id ? : NET_ID_ZERO;
192 	return 0;
193 }
194 
195 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
196  * is set to true, thus the caller knows that the new id must be notified via
197  * rtnl.
198  */
__peernet2id_alloc(struct net * net,struct net * peer,bool * alloc)199 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
200 {
201 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
202 	bool alloc_it = *alloc;
203 
204 	*alloc = false;
205 
206 	/* Magic value for id 0. */
207 	if (id == NET_ID_ZERO)
208 		return 0;
209 	if (id > 0)
210 		return id;
211 
212 	if (alloc_it) {
213 		id = alloc_netid(net, peer, -1);
214 		*alloc = true;
215 		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
216 	}
217 
218 	return NETNSA_NSID_NOT_ASSIGNED;
219 }
220 
221 /* should be called with nsid_lock held */
__peernet2id(struct net * net,struct net * peer)222 static int __peernet2id(struct net *net, struct net *peer)
223 {
224 	bool no = false;
225 
226 	return __peernet2id_alloc(net, peer, &no);
227 }
228 
229 static void rtnl_net_notifyid(struct net *net, int cmd, int id, gfp_t gfp);
230 /* This function returns the id of a peer netns. If no id is assigned, one will
231  * be allocated and returned.
232  */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)233 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
234 {
235 	bool alloc = false, alive = false;
236 	int id;
237 
238 	if (refcount_read(&net->count) == 0)
239 		return NETNSA_NSID_NOT_ASSIGNED;
240 	spin_lock_bh(&net->nsid_lock);
241 	/*
242 	 * When peer is obtained from RCU lists, we may race with
243 	 * its cleanup. Check whether it's alive, and this guarantees
244 	 * we never hash a peer back to net->netns_ids, after it has
245 	 * just been idr_remove()'d from there in cleanup_net().
246 	 */
247 	if (maybe_get_net(peer))
248 		alive = alloc = true;
249 	id = __peernet2id_alloc(net, peer, &alloc);
250 	spin_unlock_bh(&net->nsid_lock);
251 	if (alloc && id >= 0)
252 		rtnl_net_notifyid(net, RTM_NEWNSID, id, gfp);
253 	if (alive)
254 		put_net(peer);
255 	return id;
256 }
257 EXPORT_SYMBOL_GPL(peernet2id_alloc);
258 
259 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(struct net * net,struct net * peer)260 int peernet2id(struct net *net, struct net *peer)
261 {
262 	int id;
263 
264 	spin_lock_bh(&net->nsid_lock);
265 	id = __peernet2id(net, peer);
266 	spin_unlock_bh(&net->nsid_lock);
267 	return id;
268 }
269 EXPORT_SYMBOL(peernet2id);
270 
271 /* This function returns true is the peer netns has an id assigned into the
272  * current netns.
273  */
peernet_has_id(struct net * net,struct net * peer)274 bool peernet_has_id(struct net *net, struct net *peer)
275 {
276 	return peernet2id(net, peer) >= 0;
277 }
278 
get_net_ns_by_id(struct net * net,int id)279 struct net *get_net_ns_by_id(struct net *net, int id)
280 {
281 	struct net *peer;
282 
283 	if (id < 0)
284 		return NULL;
285 
286 	rcu_read_lock();
287 	peer = idr_find(&net->netns_ids, id);
288 	if (peer)
289 		peer = maybe_get_net(peer);
290 	rcu_read_unlock();
291 
292 	return peer;
293 }
294 
295 /*
296  * setup_net runs the initializers for the network namespace object.
297  */
setup_net(struct net * net,struct user_namespace * user_ns)298 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
299 {
300 	/* Must be called with pernet_ops_rwsem held */
301 	const struct pernet_operations *ops, *saved_ops;
302 	int error = 0;
303 	LIST_HEAD(net_exit_list);
304 
305 	refcount_set(&net->count, 1);
306 	refcount_set(&net->passive, 1);
307 	get_random_bytes(&net->hash_mix, sizeof(u32));
308 	net->dev_base_seq = 1;
309 	net->user_ns = user_ns;
310 	idr_init(&net->netns_ids);
311 	spin_lock_init(&net->nsid_lock);
312 	mutex_init(&net->ipv4.ra_mutex);
313 
314 	list_for_each_entry(ops, &pernet_list, list) {
315 		error = ops_init(ops, net);
316 		if (error < 0)
317 			goto out_undo;
318 	}
319 	down_write(&net_rwsem);
320 	list_add_tail_rcu(&net->list, &net_namespace_list);
321 	up_write(&net_rwsem);
322 out:
323 	return error;
324 
325 out_undo:
326 	/* Walk through the list backwards calling the exit functions
327 	 * for the pernet modules whose init functions did not fail.
328 	 */
329 	list_add(&net->exit_list, &net_exit_list);
330 	saved_ops = ops;
331 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
332 		ops_exit_list(ops, &net_exit_list);
333 
334 	ops = saved_ops;
335 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
336 		ops_free_list(ops, &net_exit_list);
337 
338 	rcu_barrier();
339 	goto out;
340 }
341 
net_defaults_init_net(struct net * net)342 static int __net_init net_defaults_init_net(struct net *net)
343 {
344 	net->core.sysctl_somaxconn = SOMAXCONN;
345 	return 0;
346 }
347 
348 static struct pernet_operations net_defaults_ops = {
349 	.init = net_defaults_init_net,
350 };
351 
net_defaults_init(void)352 static __init int net_defaults_init(void)
353 {
354 	if (register_pernet_subsys(&net_defaults_ops))
355 		panic("Cannot initialize net default settings");
356 
357 	return 0;
358 }
359 
360 core_initcall(net_defaults_init);
361 
362 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)363 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
364 {
365 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
366 }
367 
dec_net_namespaces(struct ucounts * ucounts)368 static void dec_net_namespaces(struct ucounts *ucounts)
369 {
370 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
371 }
372 
373 static struct kmem_cache *net_cachep __ro_after_init;
374 static struct workqueue_struct *netns_wq;
375 
net_alloc(void)376 static struct net *net_alloc(void)
377 {
378 	struct net *net = NULL;
379 	struct net_generic *ng;
380 
381 	ng = net_alloc_generic();
382 	if (!ng)
383 		goto out;
384 
385 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
386 	if (!net)
387 		goto out_free;
388 
389 	rcu_assign_pointer(net->gen, ng);
390 out:
391 	return net;
392 
393 out_free:
394 	kfree(ng);
395 	goto out;
396 }
397 
net_free(struct net * net)398 static void net_free(struct net *net)
399 {
400 	kfree(rcu_access_pointer(net->gen));
401 	kmem_cache_free(net_cachep, net);
402 }
403 
net_drop_ns(void * p)404 void net_drop_ns(void *p)
405 {
406 	struct net *ns = p;
407 	if (ns && refcount_dec_and_test(&ns->passive))
408 		net_free(ns);
409 }
410 
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)411 struct net *copy_net_ns(unsigned long flags,
412 			struct user_namespace *user_ns, struct net *old_net)
413 {
414 	struct ucounts *ucounts;
415 	struct net *net;
416 	int rv;
417 
418 	if (!(flags & CLONE_NEWNET))
419 		return get_net(old_net);
420 
421 	ucounts = inc_net_namespaces(user_ns);
422 	if (!ucounts)
423 		return ERR_PTR(-ENOSPC);
424 
425 	net = net_alloc();
426 	if (!net) {
427 		rv = -ENOMEM;
428 		goto dec_ucounts;
429 	}
430 	refcount_set(&net->passive, 1);
431 	net->ucounts = ucounts;
432 	get_user_ns(user_ns);
433 
434 	rv = down_read_killable(&pernet_ops_rwsem);
435 	if (rv < 0)
436 		goto put_userns;
437 
438 	rv = setup_net(net, user_ns);
439 
440 	up_read(&pernet_ops_rwsem);
441 
442 	if (rv < 0) {
443 put_userns:
444 		put_user_ns(user_ns);
445 		net_drop_ns(net);
446 dec_ucounts:
447 		dec_net_namespaces(ucounts);
448 		return ERR_PTR(rv);
449 	}
450 	return net;
451 }
452 
453 /**
454  * net_ns_get_ownership - get sysfs ownership data for @net
455  * @net: network namespace in question (can be NULL)
456  * @uid: kernel user ID for sysfs objects
457  * @gid: kernel group ID for sysfs objects
458  *
459  * Returns the uid/gid pair of root in the user namespace associated with the
460  * given network namespace.
461  */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)462 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
463 {
464 	if (net) {
465 		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
466 		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
467 
468 		if (uid_valid(ns_root_uid))
469 			*uid = ns_root_uid;
470 
471 		if (gid_valid(ns_root_gid))
472 			*gid = ns_root_gid;
473 	} else {
474 		*uid = GLOBAL_ROOT_UID;
475 		*gid = GLOBAL_ROOT_GID;
476 	}
477 }
478 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
479 
unhash_nsid(struct net * net,struct net * last)480 static void unhash_nsid(struct net *net, struct net *last)
481 {
482 	struct net *tmp;
483 	/* This function is only called from cleanup_net() work,
484 	 * and this work is the only process, that may delete
485 	 * a net from net_namespace_list. So, when the below
486 	 * is executing, the list may only grow. Thus, we do not
487 	 * use for_each_net_rcu() or net_rwsem.
488 	 */
489 	for_each_net(tmp) {
490 		int id;
491 
492 		spin_lock_bh(&tmp->nsid_lock);
493 		id = __peernet2id(tmp, net);
494 		if (id >= 0)
495 			idr_remove(&tmp->netns_ids, id);
496 		spin_unlock_bh(&tmp->nsid_lock);
497 		if (id >= 0)
498 			rtnl_net_notifyid(tmp, RTM_DELNSID, id,
499 					  GFP_KERNEL);
500 		if (tmp == last)
501 			break;
502 	}
503 	spin_lock_bh(&net->nsid_lock);
504 	idr_destroy(&net->netns_ids);
505 	spin_unlock_bh(&net->nsid_lock);
506 }
507 
508 static LLIST_HEAD(cleanup_list);
509 
cleanup_net(struct work_struct * work)510 static void cleanup_net(struct work_struct *work)
511 {
512 	const struct pernet_operations *ops;
513 	struct net *net, *tmp, *last;
514 	struct llist_node *net_kill_list;
515 	LIST_HEAD(net_exit_list);
516 
517 	/* Atomically snapshot the list of namespaces to cleanup */
518 	net_kill_list = llist_del_all(&cleanup_list);
519 
520 	down_read(&pernet_ops_rwsem);
521 
522 	/* Don't let anyone else find us. */
523 	down_write(&net_rwsem);
524 	llist_for_each_entry(net, net_kill_list, cleanup_list)
525 		list_del_rcu(&net->list);
526 	/* Cache last net. After we unlock rtnl, no one new net
527 	 * added to net_namespace_list can assign nsid pointer
528 	 * to a net from net_kill_list (see peernet2id_alloc()).
529 	 * So, we skip them in unhash_nsid().
530 	 *
531 	 * Note, that unhash_nsid() does not delete nsid links
532 	 * between net_kill_list's nets, as they've already
533 	 * deleted from net_namespace_list. But, this would be
534 	 * useless anyway, as netns_ids are destroyed there.
535 	 */
536 	last = list_last_entry(&net_namespace_list, struct net, list);
537 	up_write(&net_rwsem);
538 
539 	llist_for_each_entry(net, net_kill_list, cleanup_list) {
540 		unhash_nsid(net, last);
541 		list_add_tail(&net->exit_list, &net_exit_list);
542 	}
543 
544 	/*
545 	 * Another CPU might be rcu-iterating the list, wait for it.
546 	 * This needs to be before calling the exit() notifiers, so
547 	 * the rcu_barrier() below isn't sufficient alone.
548 	 */
549 	synchronize_rcu();
550 
551 	/* Run all of the network namespace exit methods */
552 	list_for_each_entry_reverse(ops, &pernet_list, list)
553 		ops_exit_list(ops, &net_exit_list);
554 
555 	/* Free the net generic variables */
556 	list_for_each_entry_reverse(ops, &pernet_list, list)
557 		ops_free_list(ops, &net_exit_list);
558 
559 	up_read(&pernet_ops_rwsem);
560 
561 	/* Ensure there are no outstanding rcu callbacks using this
562 	 * network namespace.
563 	 */
564 	rcu_barrier();
565 
566 	/* Finally it is safe to free my network namespace structure */
567 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
568 		list_del_init(&net->exit_list);
569 		dec_net_namespaces(net->ucounts);
570 		put_user_ns(net->user_ns);
571 		net_drop_ns(net);
572 	}
573 }
574 
575 /**
576  * net_ns_barrier - wait until concurrent net_cleanup_work is done
577  *
578  * cleanup_net runs from work queue and will first remove namespaces
579  * from the global list, then run net exit functions.
580  *
581  * Call this in module exit path to make sure that all netns
582  * ->exit ops have been invoked before the function is removed.
583  */
net_ns_barrier(void)584 void net_ns_barrier(void)
585 {
586 	down_write(&pernet_ops_rwsem);
587 	up_write(&pernet_ops_rwsem);
588 }
589 EXPORT_SYMBOL(net_ns_barrier);
590 
591 static DECLARE_WORK(net_cleanup_work, cleanup_net);
592 
__put_net(struct net * net)593 void __put_net(struct net *net)
594 {
595 	/* Cleanup the network namespace in process context */
596 	if (llist_add(&net->cleanup_list, &cleanup_list))
597 		queue_work(netns_wq, &net_cleanup_work);
598 }
599 EXPORT_SYMBOL_GPL(__put_net);
600 
get_net_ns_by_fd(int fd)601 struct net *get_net_ns_by_fd(int fd)
602 {
603 	struct file *file;
604 	struct ns_common *ns;
605 	struct net *net;
606 
607 	file = proc_ns_fget(fd);
608 	if (IS_ERR(file))
609 		return ERR_CAST(file);
610 
611 	ns = get_proc_ns(file_inode(file));
612 	if (ns->ops == &netns_operations)
613 		net = get_net(container_of(ns, struct net, ns));
614 	else
615 		net = ERR_PTR(-EINVAL);
616 
617 	fput(file);
618 	return net;
619 }
620 
621 #else
get_net_ns_by_fd(int fd)622 struct net *get_net_ns_by_fd(int fd)
623 {
624 	return ERR_PTR(-EINVAL);
625 }
626 #endif
627 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
628 
get_net_ns_by_pid(pid_t pid)629 struct net *get_net_ns_by_pid(pid_t pid)
630 {
631 	struct task_struct *tsk;
632 	struct net *net;
633 
634 	/* Lookup the network namespace */
635 	net = ERR_PTR(-ESRCH);
636 	rcu_read_lock();
637 	tsk = find_task_by_vpid(pid);
638 	if (tsk) {
639 		struct nsproxy *nsproxy;
640 		task_lock(tsk);
641 		nsproxy = tsk->nsproxy;
642 		if (nsproxy)
643 			net = get_net(nsproxy->net_ns);
644 		task_unlock(tsk);
645 	}
646 	rcu_read_unlock();
647 	return net;
648 }
649 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
650 
net_ns_net_init(struct net * net)651 static __net_init int net_ns_net_init(struct net *net)
652 {
653 #ifdef CONFIG_NET_NS
654 	net->ns.ops = &netns_operations;
655 #endif
656 	return ns_alloc_inum(&net->ns);
657 }
658 
net_ns_net_exit(struct net * net)659 static __net_exit void net_ns_net_exit(struct net *net)
660 {
661 	ns_free_inum(&net->ns);
662 }
663 
664 static struct pernet_operations __net_initdata net_ns_ops = {
665 	.init = net_ns_net_init,
666 	.exit = net_ns_net_exit,
667 };
668 
669 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
670 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
671 	[NETNSA_NSID]		= { .type = NLA_S32 },
672 	[NETNSA_PID]		= { .type = NLA_U32 },
673 	[NETNSA_FD]		= { .type = NLA_U32 },
674 };
675 
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)676 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
677 			  struct netlink_ext_ack *extack)
678 {
679 	struct net *net = sock_net(skb->sk);
680 	struct nlattr *tb[NETNSA_MAX + 1];
681 	struct nlattr *nla;
682 	struct net *peer;
683 	int nsid, err;
684 
685 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
686 			  rtnl_net_policy, extack);
687 	if (err < 0)
688 		return err;
689 	if (!tb[NETNSA_NSID]) {
690 		NL_SET_ERR_MSG(extack, "nsid is missing");
691 		return -EINVAL;
692 	}
693 	nsid = nla_get_s32(tb[NETNSA_NSID]);
694 
695 	if (tb[NETNSA_PID]) {
696 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
697 		nla = tb[NETNSA_PID];
698 	} else if (tb[NETNSA_FD]) {
699 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
700 		nla = tb[NETNSA_FD];
701 	} else {
702 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
703 		return -EINVAL;
704 	}
705 	if (IS_ERR(peer)) {
706 		NL_SET_BAD_ATTR(extack, nla);
707 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
708 		return PTR_ERR(peer);
709 	}
710 
711 	spin_lock_bh(&net->nsid_lock);
712 	if (__peernet2id(net, peer) >= 0) {
713 		spin_unlock_bh(&net->nsid_lock);
714 		err = -EEXIST;
715 		NL_SET_BAD_ATTR(extack, nla);
716 		NL_SET_ERR_MSG(extack,
717 			       "Peer netns already has a nsid assigned");
718 		goto out;
719 	}
720 
721 	err = alloc_netid(net, peer, nsid);
722 	spin_unlock_bh(&net->nsid_lock);
723 	if (err >= 0) {
724 		rtnl_net_notifyid(net, RTM_NEWNSID, err, GFP_KERNEL);
725 		err = 0;
726 	} else if (err == -ENOSPC && nsid >= 0) {
727 		err = -EEXIST;
728 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
729 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
730 	}
731 out:
732 	put_net(peer);
733 	return err;
734 }
735 
rtnl_net_get_size(void)736 static int rtnl_net_get_size(void)
737 {
738 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
739 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
740 	       ;
741 }
742 
rtnl_net_fill(struct sk_buff * skb,u32 portid,u32 seq,int flags,int cmd,struct net * net,int nsid)743 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
744 			 int cmd, struct net *net, int nsid)
745 {
746 	struct nlmsghdr *nlh;
747 	struct rtgenmsg *rth;
748 
749 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
750 	if (!nlh)
751 		return -EMSGSIZE;
752 
753 	rth = nlmsg_data(nlh);
754 	rth->rtgen_family = AF_UNSPEC;
755 
756 	if (nla_put_s32(skb, NETNSA_NSID, nsid))
757 		goto nla_put_failure;
758 
759 	nlmsg_end(skb, nlh);
760 	return 0;
761 
762 nla_put_failure:
763 	nlmsg_cancel(skb, nlh);
764 	return -EMSGSIZE;
765 }
766 
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)767 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
768 			  struct netlink_ext_ack *extack)
769 {
770 	struct net *net = sock_net(skb->sk);
771 	struct nlattr *tb[NETNSA_MAX + 1];
772 	struct nlattr *nla;
773 	struct sk_buff *msg;
774 	struct net *peer;
775 	int err, id;
776 
777 	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
778 			  rtnl_net_policy, extack);
779 	if (err < 0)
780 		return err;
781 	if (tb[NETNSA_PID]) {
782 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
783 		nla = tb[NETNSA_PID];
784 	} else if (tb[NETNSA_FD]) {
785 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
786 		nla = tb[NETNSA_FD];
787 	} else {
788 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
789 		return -EINVAL;
790 	}
791 
792 	if (IS_ERR(peer)) {
793 		NL_SET_BAD_ATTR(extack, nla);
794 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
795 		return PTR_ERR(peer);
796 	}
797 
798 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
799 	if (!msg) {
800 		err = -ENOMEM;
801 		goto out;
802 	}
803 
804 	id = peernet2id(net, peer);
805 	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
806 			    RTM_NEWNSID, net, id);
807 	if (err < 0)
808 		goto err_out;
809 
810 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
811 	goto out;
812 
813 err_out:
814 	nlmsg_free(msg);
815 out:
816 	put_net(peer);
817 	return err;
818 }
819 
820 struct rtnl_net_dump_cb {
821 	struct net *net;
822 	struct sk_buff *skb;
823 	struct netlink_callback *cb;
824 	int idx;
825 	int s_idx;
826 };
827 
rtnl_net_dumpid_one(int id,void * peer,void * data)828 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
829 {
830 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
831 	int ret;
832 
833 	if (net_cb->idx < net_cb->s_idx)
834 		goto cont;
835 
836 	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
837 			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
838 			    RTM_NEWNSID, net_cb->net, id);
839 	if (ret < 0)
840 		return ret;
841 
842 cont:
843 	net_cb->idx++;
844 	return 0;
845 }
846 
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)847 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
848 {
849 	struct net *net = sock_net(skb->sk);
850 	struct rtnl_net_dump_cb net_cb = {
851 		.net = net,
852 		.skb = skb,
853 		.cb = cb,
854 		.idx = 0,
855 		.s_idx = cb->args[0],
856 	};
857 
858 	spin_lock_bh(&net->nsid_lock);
859 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
860 	spin_unlock_bh(&net->nsid_lock);
861 
862 	cb->args[0] = net_cb.idx;
863 	return skb->len;
864 }
865 
rtnl_net_notifyid(struct net * net,int cmd,int id,gfp_t gfp)866 static void rtnl_net_notifyid(struct net *net, int cmd, int id, gfp_t gfp)
867 {
868 	struct sk_buff *msg;
869 	int err = -ENOMEM;
870 
871 	msg = nlmsg_new(rtnl_net_get_size(), gfp);
872 	if (!msg)
873 		goto out;
874 
875 	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
876 	if (err < 0)
877 		goto err_out;
878 
879 	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, gfp);
880 	return;
881 
882 err_out:
883 	nlmsg_free(msg);
884 out:
885 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
886 }
887 
net_ns_init(void)888 static int __init net_ns_init(void)
889 {
890 	struct net_generic *ng;
891 
892 #ifdef CONFIG_NET_NS
893 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
894 					SMP_CACHE_BYTES,
895 					SLAB_PANIC|SLAB_ACCOUNT, NULL);
896 
897 	/* Create workqueue for cleanup */
898 	netns_wq = create_singlethread_workqueue("netns");
899 	if (!netns_wq)
900 		panic("Could not create netns workq");
901 #endif
902 
903 	ng = net_alloc_generic();
904 	if (!ng)
905 		panic("Could not allocate generic netns");
906 
907 	rcu_assign_pointer(init_net.gen, ng);
908 
909 	down_write(&pernet_ops_rwsem);
910 	if (setup_net(&init_net, &init_user_ns))
911 		panic("Could not setup the initial network namespace");
912 
913 	init_net_initialized = true;
914 	up_write(&pernet_ops_rwsem);
915 
916 	if (register_pernet_subsys(&net_ns_ops))
917 		panic("Could not register network namespace subsystems");
918 
919 	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
920 		      RTNL_FLAG_DOIT_UNLOCKED);
921 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
922 		      RTNL_FLAG_DOIT_UNLOCKED);
923 
924 	return 0;
925 }
926 
927 pure_initcall(net_ns_init);
928 
929 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)930 static int __register_pernet_operations(struct list_head *list,
931 					struct pernet_operations *ops)
932 {
933 	struct net *net;
934 	int error;
935 	LIST_HEAD(net_exit_list);
936 
937 	list_add_tail(&ops->list, list);
938 	if (ops->init || (ops->id && ops->size)) {
939 		/* We held write locked pernet_ops_rwsem, and parallel
940 		 * setup_net() and cleanup_net() are not possible.
941 		 */
942 		for_each_net(net) {
943 			error = ops_init(ops, net);
944 			if (error)
945 				goto out_undo;
946 			list_add_tail(&net->exit_list, &net_exit_list);
947 		}
948 	}
949 	return 0;
950 
951 out_undo:
952 	/* If I have an error cleanup all namespaces I initialized */
953 	list_del(&ops->list);
954 	ops_exit_list(ops, &net_exit_list);
955 	ops_free_list(ops, &net_exit_list);
956 	return error;
957 }
958 
__unregister_pernet_operations(struct pernet_operations * ops)959 static void __unregister_pernet_operations(struct pernet_operations *ops)
960 {
961 	struct net *net;
962 	LIST_HEAD(net_exit_list);
963 
964 	list_del(&ops->list);
965 	/* See comment in __register_pernet_operations() */
966 	for_each_net(net)
967 		list_add_tail(&net->exit_list, &net_exit_list);
968 	ops_exit_list(ops, &net_exit_list);
969 	ops_free_list(ops, &net_exit_list);
970 }
971 
972 #else
973 
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)974 static int __register_pernet_operations(struct list_head *list,
975 					struct pernet_operations *ops)
976 {
977 	if (!init_net_initialized) {
978 		list_add_tail(&ops->list, list);
979 		return 0;
980 	}
981 
982 	return ops_init(ops, &init_net);
983 }
984 
__unregister_pernet_operations(struct pernet_operations * ops)985 static void __unregister_pernet_operations(struct pernet_operations *ops)
986 {
987 	if (!init_net_initialized) {
988 		list_del(&ops->list);
989 	} else {
990 		LIST_HEAD(net_exit_list);
991 		list_add(&init_net.exit_list, &net_exit_list);
992 		ops_exit_list(ops, &net_exit_list);
993 		ops_free_list(ops, &net_exit_list);
994 	}
995 }
996 
997 #endif /* CONFIG_NET_NS */
998 
999 static DEFINE_IDA(net_generic_ids);
1000 
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1001 static int register_pernet_operations(struct list_head *list,
1002 				      struct pernet_operations *ops)
1003 {
1004 	int error;
1005 
1006 	if (ops->id) {
1007 		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1008 				GFP_KERNEL);
1009 		if (error < 0)
1010 			return error;
1011 		*ops->id = error;
1012 		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
1013 	}
1014 	error = __register_pernet_operations(list, ops);
1015 	if (error) {
1016 		rcu_barrier();
1017 		if (ops->id)
1018 			ida_free(&net_generic_ids, *ops->id);
1019 	}
1020 
1021 	return error;
1022 }
1023 
unregister_pernet_operations(struct pernet_operations * ops)1024 static void unregister_pernet_operations(struct pernet_operations *ops)
1025 {
1026 	__unregister_pernet_operations(ops);
1027 	rcu_barrier();
1028 	if (ops->id)
1029 		ida_free(&net_generic_ids, *ops->id);
1030 }
1031 
1032 /**
1033  *      register_pernet_subsys - register a network namespace subsystem
1034  *	@ops:  pernet operations structure for the subsystem
1035  *
1036  *	Register a subsystem which has init and exit functions
1037  *	that are called when network namespaces are created and
1038  *	destroyed respectively.
1039  *
1040  *	When registered all network namespace init functions are
1041  *	called for every existing network namespace.  Allowing kernel
1042  *	modules to have a race free view of the set of network namespaces.
1043  *
1044  *	When a new network namespace is created all of the init
1045  *	methods are called in the order in which they were registered.
1046  *
1047  *	When a network namespace is destroyed all of the exit methods
1048  *	are called in the reverse of the order with which they were
1049  *	registered.
1050  */
register_pernet_subsys(struct pernet_operations * ops)1051 int register_pernet_subsys(struct pernet_operations *ops)
1052 {
1053 	int error;
1054 	down_write(&pernet_ops_rwsem);
1055 	error =  register_pernet_operations(first_device, ops);
1056 	up_write(&pernet_ops_rwsem);
1057 	return error;
1058 }
1059 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1060 
1061 /**
1062  *      unregister_pernet_subsys - unregister a network namespace subsystem
1063  *	@ops: pernet operations structure to manipulate
1064  *
1065  *	Remove the pernet operations structure from the list to be
1066  *	used when network namespaces are created or destroyed.  In
1067  *	addition run the exit method for all existing network
1068  *	namespaces.
1069  */
unregister_pernet_subsys(struct pernet_operations * ops)1070 void unregister_pernet_subsys(struct pernet_operations *ops)
1071 {
1072 	down_write(&pernet_ops_rwsem);
1073 	unregister_pernet_operations(ops);
1074 	up_write(&pernet_ops_rwsem);
1075 }
1076 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1077 
1078 /**
1079  *      register_pernet_device - register a network namespace device
1080  *	@ops:  pernet operations structure for the subsystem
1081  *
1082  *	Register a device which has init and exit functions
1083  *	that are called when network namespaces are created and
1084  *	destroyed respectively.
1085  *
1086  *	When registered all network namespace init functions are
1087  *	called for every existing network namespace.  Allowing kernel
1088  *	modules to have a race free view of the set of network namespaces.
1089  *
1090  *	When a new network namespace is created all of the init
1091  *	methods are called in the order in which they were registered.
1092  *
1093  *	When a network namespace is destroyed all of the exit methods
1094  *	are called in the reverse of the order with which they were
1095  *	registered.
1096  */
register_pernet_device(struct pernet_operations * ops)1097 int register_pernet_device(struct pernet_operations *ops)
1098 {
1099 	int error;
1100 	down_write(&pernet_ops_rwsem);
1101 	error = register_pernet_operations(&pernet_list, ops);
1102 	if (!error && (first_device == &pernet_list))
1103 		first_device = &ops->list;
1104 	up_write(&pernet_ops_rwsem);
1105 	return error;
1106 }
1107 EXPORT_SYMBOL_GPL(register_pernet_device);
1108 
1109 /**
1110  *      unregister_pernet_device - unregister a network namespace netdevice
1111  *	@ops: pernet operations structure to manipulate
1112  *
1113  *	Remove the pernet operations structure from the list to be
1114  *	used when network namespaces are created or destroyed.  In
1115  *	addition run the exit method for all existing network
1116  *	namespaces.
1117  */
unregister_pernet_device(struct pernet_operations * ops)1118 void unregister_pernet_device(struct pernet_operations *ops)
1119 {
1120 	down_write(&pernet_ops_rwsem);
1121 	if (&ops->list == first_device)
1122 		first_device = first_device->next;
1123 	unregister_pernet_operations(ops);
1124 	up_write(&pernet_ops_rwsem);
1125 }
1126 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1127 
1128 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1129 static struct ns_common *netns_get(struct task_struct *task)
1130 {
1131 	struct net *net = NULL;
1132 	struct nsproxy *nsproxy;
1133 
1134 	task_lock(task);
1135 	nsproxy = task->nsproxy;
1136 	if (nsproxy)
1137 		net = get_net(nsproxy->net_ns);
1138 	task_unlock(task);
1139 
1140 	return net ? &net->ns : NULL;
1141 }
1142 
to_net_ns(struct ns_common * ns)1143 static inline struct net *to_net_ns(struct ns_common *ns)
1144 {
1145 	return container_of(ns, struct net, ns);
1146 }
1147 
netns_put(struct ns_common * ns)1148 static void netns_put(struct ns_common *ns)
1149 {
1150 	put_net(to_net_ns(ns));
1151 }
1152 
netns_install(struct nsproxy * nsproxy,struct ns_common * ns)1153 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1154 {
1155 	struct net *net = to_net_ns(ns);
1156 
1157 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1158 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1159 		return -EPERM;
1160 
1161 	put_net(nsproxy->net_ns);
1162 	nsproxy->net_ns = get_net(net);
1163 	return 0;
1164 }
1165 
netns_owner(struct ns_common * ns)1166 static struct user_namespace *netns_owner(struct ns_common *ns)
1167 {
1168 	return to_net_ns(ns)->user_ns;
1169 }
1170 
1171 const struct proc_ns_operations netns_operations = {
1172 	.name		= "net",
1173 	.type		= CLONE_NEWNET,
1174 	.get		= netns_get,
1175 	.put		= netns_put,
1176 	.install	= netns_install,
1177 	.owner		= netns_owner,
1178 };
1179 #endif
1180