• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0-or-later
3# Copyright (c) 2019 Petr Vorel <pvorel@suse.cz>
4# Copyright (c) 2009 FUJITSU LIMITED
5# Author: Li Zefan <lizf@cn.fujitsu.com>
6
7TST_TESTFUNC=test
8TST_SETUP=do_setup
9TST_CLEANUP=do_cleanup
10TST_CNT=10
11TST_NEEDS_ROOT=1
12TST_NEEDS_TMPDIR=1
13TST_NEEDS_CMDS="awk dmesg find mountpoint rmdir"
14
15. cgroup_lib.sh
16
17do_setup()
18{
19	mkdir cgroup/
20
21	if tst_kvcmp -lt "2.6.29"; then
22		tst_brk TCONF ignored "test must be run with kernel 2.6.29 or newer"
23	fi
24
25	if [ ! -f /proc/cgroups ]; then
26		tst_brk TCONF ignored "Kernel does not support for control groups; skipping testcases";
27	fi
28
29	dmesg -c > /dev/null
30	NR_BUG=`dmesg | grep -c "kernel BUG"`
31	NR_NULL=`dmesg | grep -c "kernel NULL pointer dereference"`
32	NR_WARNING=`dmesg | grep -c "^WARNING"`
33	NR_LOCKDEP=`dmesg | grep -c "possible recursive locking detected"`
34}
35
36do_cleanup()
37{
38	if mountpoint -q cgroup/; then
39		find cgroup/ -maxdepth 1 -depth -exec rmdir {} +
40		umount cgroup
41		rmdir cgroup
42	fi
43}
44
45check_kernel_bug()
46{
47	local id="$1"
48	local ok_msg="no kernel bug was found"
49	local new_bug=`dmesg | grep -c "kernel BUG"`
50	local new_null=`dmesg | grep -c "kernel NULL pointer dereference"`
51	local new_warning=`dmesg | grep -c "^WARNING"`
52	local new_lockdep=`dmesg | grep -c "possible recursive locking detected"`
53
54	[ "$id" ] && ok_msg="$ok_msg for test $i"
55
56	# no kernel bug is detected
57	if [ $new_bug -eq $NR_BUG -a $new_warning -eq $NR_WARNING -a \
58	     $new_null -eq $NR_NULL -a $new_lockdep -eq $NR_LOCKDEP ]; then
59		tst_res TPASS $ok_msg
60		return 0
61	fi
62
63	# some kernel bug is detected
64	if [ $new_bug -gt $NR_BUG ]; then
65		tst_res TFAIL "kernel BUG was detected!"
66	fi
67	if [ $new_warning -gt $NR_WARNING ]; then
68		tst_res TFAIL "kernel WARNING was detected!"
69	fi
70	if [ $new_null -gt $NR_NULL ]; then
71		tst_res TFAIL "kernel NULL pointer dereference!"
72	fi
73	if [ $new_lockdep -gt $NR_LOCKDEP ]; then
74		tst_res TFAIL "kernel lockdep warning was detected!"
75	fi
76
77	NR_BUG=$new_bug
78	NR_NULL=$new_null
79	NR_WARNING=$new_warning
80	NR_LOCKDEP=$new_lockdep
81
82	tst_res TWARN "BUG FOUND!"
83	dmesg
84	return 1
85}
86
87#---------------------------------------------------------------------------
88# Bug:    There was a race when keeping forking processes and at the same
89#         time cat /cgroup/tasks (should be the very first time to read
90#         /cgroup/tasks, otherwise this bug won't be triggered)
91# Kernel: 2.6.24, 2.6.25-rcX
92# Links:  http://lkml.org/lkml/2007/10/17/224
93#         http://lkml.org/lkml/2008/3/5/332
94#         http://lkml.org/lkml/2008/4/16/493
95# Fix:    commit 0e04388f0189fa1f6812a8e1cb6172136eada87e
96#---------------------------------------------------------------------------
97test1()
98{
99	cgroup_regression_fork_processes &
100	sleep 1
101
102	mount -t cgroup -o none,name=foo cgroup cgroup/
103	if [ $? -ne 0 ]; then
104		tst_res TFAIL "failed to mount cgroup filesystem"
105		kill -TERM $!
106		return
107	fi
108	cat cgroup/tasks > /dev/null
109
110	kill -TERM $!
111	wait $! 2>/dev/null
112	umount cgroup
113	check_kernel_bug
114}
115
116#---------------------------------------------------------------------------
117# Bug:    a cgroup's notify_on_release flag did not inherit from its parent.
118# Kernel: 2.6.24-rcX
119# Links:  http://lkml.org/lkml/2008/2/25/12
120# Fix:    commit bc231d2a048010d5e0b49ac7fddbfa822fc41109
121#---------------------------------------------------------------------------
122test2()
123{
124	local val1
125	local val2
126
127	mount -t cgroup -o none,name=foo cgroup cgroup/
128	if [ $? -ne 0 ]; then
129		tst_res TFAIL "Failed to mount cgroup filesystem"
130		return
131	fi
132
133	echo 0 > cgroup/notify_on_release
134	mkdir cgroup/0
135	val1=`cat cgroup/0/notify_on_release`
136
137	echo 1 > cgroup/notify_on_release
138	mkdir cgroup/1
139	val2=`cat cgroup/1/notify_on_release`
140
141	if [ $val1 -ne 0 -o $val2 -ne 1 ]; then
142		tst_res TFAIL "wrong notify_on_release value"
143	else
144		tst_res TPASS "notify_on_release is inherited"
145	fi
146
147	rmdir cgroup/0 cgroup/1
148	umount cgroup
149}
150
151#---------------------------------------------------------------------------
152# Bug:    Accessing NULL cgrp->dentry when reading /proc/sched_debug
153# Kernel: 2.6.26-2.6.28
154# Links:  http://lkml.org/lkml/2008/10/30/44
155#         http://lkml.org/lkml/2008/12/12/107
156#         http://lkml.org/lkml/2008/12/16/481
157# Fix:    commit a47295e6bc42ad35f9c15ac66f598aa24debd4e2
158#---------------------------------------------------------------------------
159test3()
160{
161	local cpu_subsys_path
162
163	if [ ! -e /proc/sched_debug ]; then
164		tst_res TCONF "CONFIG_SCHED_DEBUG is not enabled"
165		return
166	fi
167
168	if ! grep -q -w "cpu" /proc/cgroups; then
169		tst_res TCONF "CONFIG_CGROUP_SCHED is not enabled"
170		return
171	fi
172
173	cpu_subsys_path=$(get_cgroup_mountpoint "cpu")
174
175	# Run the test for 30 secs
176	if [ -z "$cpu_subsys_path" ]; then
177		mount -t cgroup -o cpu xxx cgroup/
178		if [ $? -ne 0 ]; then
179			tst_res TFAIL "Failed to mount cpu subsys"
180			return
181		fi
182		cpu_subsys_path=cgroup
183	fi
184
185	cgroup_regression_3_1.sh $cpu_subsys_path &
186	pid1=$!
187	cgroup_regression_3_2.sh &
188	pid2=$!
189
190	sleep 30
191	kill -USR1 $pid1 $pid2
192	wait $pid1 2>/dev/null
193	wait $pid2 2>/dev/null
194
195	rmdir $cpu_subsys_path/* 2> /dev/null
196	umount cgroup 2> /dev/null
197	check_kernel_bug
198}
199
200#---------------------------------------------------------------------------
201# Bug:    cgroup hierarchy lock's lockdep subclass may overflow
202# Kernel: 2.6.29-rcX
203# Link:   http://lkml.org/lkml/2009/2/4/67
204# Fix:
205#---------------------------------------------------------------------------
206test4()
207{
208	local lines
209
210	if [ ! -e /proc/lockdep ]; then
211		tst_res TCONF "CONFIG_LOCKDEP is not enabled"
212		return
213	fi
214
215	# MAX_LOCKDEP_SUBCLASSES is 8, so number of subsys should be > 8
216	lines=`cat /proc/cgroups | wc -l`
217	if [ $lines -le 9 ]; then
218		tst_res TCONF "require more than 8 cgroup subsystems"
219		return
220	fi
221
222	mount -t cgroup -o none,name=foo cgroup cgroup/
223	mkdir cgroup/0
224	rmdir cgroup/0
225	umount cgroup
226
227	if dmesg | grep -q "MAX_LOCKDEP_SUBCLASSES too low"; then
228		tst_res TFAIL "lockdep BUG was found"
229		return
230	fi
231
232	tst_res TPASS "no lockdep BUG was found"
233}
234
235#---------------------------------------------------------------------------
236# Bug:    When mount cgroup fs and the fs was busy, root_count should not be
237#         decremented in cgroup_kill_sb()
238# Kernel: 2.6.29-rcX
239# Links:  https://openvz.org/pipermail/devel/2009-January/016345.html
240#         http://lkml.org/lkml/2009/1/28/190
241# Fix:    commit 839ec5452ebfd5905b9c69b20ceb640903a8ea1a
242#---------------------------------------------------------------------------
243test5()
244{
245	local mounted
246	local failing
247	local mntpoint
248
249	local lines=`cat /proc/cgroups | wc -l`
250	if [ $lines -le 2 ]; then
251		tst_res TCONF "require at least 2 cgroup subsystems"
252		return
253	fi
254
255	local subsys1=`tail -n 1 /proc/cgroups | awk '{ print $1 }'`
256	local subsys2=`tail -n 2 /proc/cgroups | head -1 | awk '{ print $1 }'`
257
258	# Accounting here for the fact that the chosen subsystems could
259	# have been already previously mounted at boot time: in such a
260	# case we must skip the initial co-mount step (which would
261	# fail anyway) and properly re-organize the $mntpoint and
262	# $failing params to be used in the following expected-to-fail
263	# mount action. Note that the subsysN name itself will be listed
264	# amongst mounts options.
265	get_cgroup_mountpoint $subsys1 >/dev/null && mounted=$subsys1
266	[ -z "$mounted" ] && get_cgroup_mountpoint $subsys2 >/dev/null && mounted=$subsys2
267	if [ -z "$mounted" ]; then
268		mntpoint=cgroup
269		failing=$subsys1
270		mount -t cgroup -o $subsys1,$subsys2 xxx $mntpoint/
271		if [ $? -ne 0 ]; then
272			tst_res TFAIL "mount $subsys1 and $subsys2 failed"
273			return
274		fi
275	else
276		# Use the pre-esistent mountpoint as $mntpoint and use a
277		# co-mount with $failing: this way the 2nd mount will
278		# also fail (as expected) in this 'mirrored' configuration.
279		mntpoint=$(get_cgroup_mountpoint $mounted)
280		failing=$subsys1,$subsys2
281	fi
282
283	# This 2nd mount has been properly configured to fail
284	mount -t cgroup -o $failing xxx $mntpoint/ 2> /dev/null
285	if [ $? -eq 0 ]; then
286		tst_res TFAIL "mount $failing should fail"
287		# Do NOT unmount pre-existent mountpoints...
288		[ -z "$mounted" ] && umount $mntpoint
289		return
290	fi
291
292	mkdir $mntpoint/0
293	# Otherwise we can't attach task
294	if [ "$subsys1" = cpuset -o "$subsys2" = cpuset ]; then
295		echo 0 > $mntpoint/0/cpuset.cpus 2> /dev/null
296		echo 0 > $mntpoint/0/cpuset.mems 2> /dev/null
297	fi
298
299	sleep 100 &
300	echo $! > $mntpoint/0/tasks
301
302	kill -TERM $! > /dev/null
303	wait $! 2>/dev/null
304	rmdir $mntpoint/0
305	# Do NOT unmount pre-existent mountpoints...
306	[ -z "$mounted" ] && umount $mntpoint
307	check_kernel_bug
308}
309
310#---------------------------------------------------------------------------
311# Bug:    There was a race between cgroup_clone and umount
312# Kernel: 2.6.24 - 2.6.28, 2.6.29-rcX
313# Links:  http://lkml.org/lkml/2008/12/24/124
314# Fix:    commit 7b574b7b0124ed344911f5d581e9bc2d83bbeb19
315#---------------------------------------------------------------------------
316test6()
317{
318	if tst_kvcmp -ge "3.0"; then
319		tst_res TCONF "CONFIG_CGROUP_NS is NOT supported in Kernels >= 3.0"
320		return
321	fi
322
323	if ! grep -q -w "ns" /proc/cgroups; then
324		tst_res TCONF "CONFIG_CGROUP_NS is NOT enabled"
325		return
326	fi
327
328	cgroup_regression_6_1.sh &
329	local pid1=$!
330	cgroup_regression_6_2 &
331	local pid2=$!
332
333	tst_res TINFO "run test for 30 sec"
334	sleep 30
335	kill -USR1 $pid1
336	kill -TERM $pid2
337	wait $pid1 2>/dev/null
338	wait $pid2 2>/dev/null
339
340	mount -t cgroup -o ns xxx cgroup/ > /dev/null 2>&1
341	rmdir cgroup/[1-9]* > /dev/null 2>&1
342	umount cgroup
343	check_kernel_bug
344}
345
346#---------------------------------------------------------------------------
347# Bug:    There was a bug when remount cgroup fs with some dead subdirs in
348#         it (rmdir()ed but still has some refcnts on it). It caused memory
349#         leak, and may cause oops when cat /proc/sched_debug.
350# Kernel: 2.6.24 - 2.6.27, 2.6.28-rcX
351# Links:  http://lkml.org/lkml/2008/12/10/369
352# Fix:    commit 307257cf475aac25db30b669987f13d90c934e3a
353#---------------------------------------------------------------------------
354test_7_1()
355{
356	local subsys=$1
357	# we should be careful to select a $subsys_path which is related to
358	# cgroup only: if cgroup debugging is enabled a 'debug' $subsys
359	# could be passed here as params and this will lead to ambiguity and
360	# errors when grepping simply for 'debug' in /proc/mounts since we'll
361	# find also /sys/kernel/debug. Helper takes care of this.
362	local subsys_path=$(get_cgroup_mountpoint $subsys)
363
364	if [ -z "$subsys_path" ]; then
365		mount -t cgroup -o $subsys xxx cgroup/
366		if [ $? -ne 0 ]; then
367			tst_res TFAIL "failed to mount $subsys"
368			return
369		fi
370		subsys_path=cgroup
371	fi
372
373	mkdir $subsys_path/0
374	sleep 100 < $subsys_path/0 &	# add refcnt to this dir
375	rmdir $subsys_path/0
376
377	# remount with new subsystems added
378	# since 2.6.28, this remount will fail
379
380	if [ "$subsys_path" = "cgroup" ]; then
381		mount -t cgroup -o remount xxx cgroup/ 2> /dev/null
382		kill -TERM $!
383		wait $! 2>/dev/null
384		umount cgroup
385	fi
386}
387
388test_7_2()
389{
390	local subsys=$1
391
392	mount -t cgroup -o none,name=foo cgroup cgroup/
393	if [ $? -ne 0 ]; then
394		tst_res TFAIL "failed to mount cgroup"
395		return
396	fi
397
398	mkdir cgroup/0
399	sleep 100 < cgroup/0 &	# add refcnt to this dir
400	rmdir cgroup/0
401
402	# remount with some subsystems removed
403	# since 2.6.28, this remount will fail
404	mount -t cgroup -o remount,$subsys xxx cgroup/ 2> /dev/null
405	kill -TERM $!
406	wait $! 2>/dev/null
407	umount cgroup
408
409	grep -q -w "cpu" /proc/cgroups
410	if [ $? -ne 0 -o ! -e /proc/sched_debug ]; then
411		tst_res TWARN "skip rest of testing due possible oops triggered by reading /proc/sched_debug"
412		return
413	fi
414
415	tmp=0
416	while [ $tmp -lt 50 ]; do
417		echo 3 > /proc/sys/vm/drop_caches
418		cat /proc/sched_debug > /dev/null
419		tmp=$((tmp+1))
420	done
421}
422
423test7()
424{
425	local lines=`cat /proc/cgroups | wc -l`
426	local subsys
427	local i=1
428
429	if [ $lines -le 2 ]; then
430		tst_res TCONF "require at least 2 cgroup subsystems"
431		slt_result $SLT_Untested
432		return
433	fi
434
435	subsys=`tail -n 1 /proc/cgroups | awk '{ print $1 }'`
436
437	# remount to add new subsystems to the hierarchy
438	while [ $i -le 2 ]; do
439		test_7_$i $subsys || return
440		check_kernel_bug $i || return
441		i=$((i+1))
442	done
443}
444
445#---------------------------------------------------------------------------
446# Bug:    oops when get cgroupstat of a cgroup control file
447# Kernel: 2.6.24 - 2.6.27, 2.6.28-rcX
448# Links:  http://lkml.org/lkml/2008/11/19/53
449# Fix:    commit 33d283bef23132c48195eafc21449f8ba88fce6b
450#---------------------------------------------------------------------------
451test8()
452{
453	mount -t cgroup -o none,name=foo cgroup cgroup/
454	if [ $? -ne 0 ]; then
455		tst_res TFAIL "failed to mount cgroup filesystem"
456		return
457	fi
458
459	if cgroup_regression_getdelays -C cgroup/tasks > /dev/null 2>&1; then
460		tst_res TFAIL "should have failed to get cgroupstat of tasks file"
461	fi
462
463	umount cgroup
464	check_kernel_bug
465}
466
467#---------------------------------------------------------------------------
468# Bug:    When running 2 concurrent mount/umount threads, lockdep warning
469#         may be triggered, it's a false positive, and it's VFS' issue but
470#         not cgroup.
471# Kernel: 2.6.24 - 2.6.29-rcX
472# Links:  http://lkml.org/lkml/2009/1/4/352
473# Fix:    commit ada723dcd681e2dffd7d73345cc8fda0eb0df9bd
474#---------------------------------------------------------------------------
475test9()
476{
477	cgroup_regression_9_1.sh &
478	local pid1=$!
479	cgroup_regression_9_2.sh &
480	local pid2=$!
481
482	sleep 30
483	kill -USR1 $pid1 $pid2
484	wait $pid1 2>/dev/null
485	wait $pid2 2>/dev/null
486
487	umount cgroup 2> /dev/null
488	check_kernel_bug
489}
490
491#---------------------------------------------------------------------------
492# Bug:    When running 2 concurrent mount/umount threads, kernel WARNING
493#         may be triggered, but it's VFS' issue but not cgroup.
494# Kernel: 2.6.24 - 2.6.29-rcX
495# Links:  http://lkml.org/lkml/2009/1/4/354
496# Fix:    commit 1a88b5364b535edaa321d70a566e358390ff0872
497#---------------------------------------------------------------------------
498test10()
499{
500	cgroup_regression_10_1.sh &
501	local pid1=$!
502	cgroup_regression_10_2.sh &
503	local pid2=$!
504
505	sleep 30
506	kill -USR1 $pid1 $pid2
507	wait $pid1 2>/dev/null
508	wait $pid2 2>/dev/null
509
510	mount -t cgroup none cgroup 2> /dev/null
511	mkdir cgroup/0
512	rmdir cgroup/0
513	umount cgroup 2> /dev/null
514	check_kernel_bug
515}
516
517tst_run
518