1From 9d2c36f2fc8ece386e2428ba7c66e3a2fa6bef78 Mon Sep 17 00:00:00 2001 2From: Edward Liaw <edliaw@google.com> 3Date: Tue, 19 Apr 2022 23:09:36 +0000 4Subject: [PATCH 12/24] run vmtests 5 6The hugepage-mmap, hugepage-shm, map_hugetlb, compaction, and 7userfaultfd tests cannot be run due to unmet dependencies. 8 9(cherry picked from commit 946413011af990b7220e73af57c0bc2196275524) 10Bug: 67017050 11Test: make -j vts 12--- 13 tools/testing/selftests/vm/run_vmtests.sh | 424 +++++++++++----------- 14 1 file changed, 212 insertions(+), 212 deletions(-) 15 16diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh 17index 71d2dc198fc17..8ea31e76ee9b4 100755 18--- a/tools/testing/selftests/vm/run_vmtests.sh 19+++ b/tools/testing/selftests/vm/run_vmtests.sh 20@@ -8,218 +8,218 @@ ksft_skip=4 21 mnt=./huge 22 exitcode=0 23 24-#get huge pagesize and freepages from /proc/meminfo 25-while read name size unit; do 26- if [ "$name" = "HugePages_Free:" ]; then 27- freepgs=$size 28- fi 29- if [ "$name" = "Hugepagesize:" ]; then 30- hpgsize_KB=$size 31- fi 32-done < /proc/meminfo 33- 34-# Simple hugetlbfs tests have a hardcoded minimum requirement of 35-# huge pages totaling 256MB (262144KB) in size. The userfaultfd 36-# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take 37-# both of these requirements into account and attempt to increase 38-# number of huge pages available. 39-nr_cpus=$(nproc) 40-hpgsize_MB=$((hpgsize_KB / 1024)) 41-half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) 42-needmem_KB=$((half_ufd_size_MB * 2 * 1024)) 43- 44-#set proper nr_hugepages 45-if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then 46- nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` 47- needpgs=$((needmem_KB / hpgsize_KB)) 48- tries=2 49- while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do 50- lackpgs=$(( $needpgs - $freepgs )) 51- echo 3 > /proc/sys/vm/drop_caches 52- echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages 53- if [ $? -ne 0 ]; then 54- echo "Please run this test as root" 55- exit $ksft_skip 56- fi 57- while read name size unit; do 58- if [ "$name" = "HugePages_Free:" ]; then 59- freepgs=$size 60- fi 61- done < /proc/meminfo 62- tries=$((tries - 1)) 63- done 64- if [ $freepgs -lt $needpgs ]; then 65- printf "Not enough huge pages available (%d < %d)\n" \ 66- $freepgs $needpgs 67- exit 1 68- fi 69-else 70- echo "no hugetlbfs support in kernel?" 71- exit 1 72-fi 73- 74-#filter 64bit architectures 75-ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64" 76-if [ -z $ARCH ]; then 77- ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'` 78-fi 79-VADDR64=0 80-echo "$ARCH64STR" | grep $ARCH && VADDR64=1 81- 82-mkdir $mnt 83-mount -t hugetlbfs none $mnt 84- 85-echo "---------------------" 86-echo "running hugepage-mmap" 87-echo "---------------------" 88-./hugepage-mmap 89-if [ $? -ne 0 ]; then 90- echo "[FAIL]" 91- exitcode=1 92-else 93- echo "[PASS]" 94-fi 95- 96-shmmax=`cat /proc/sys/kernel/shmmax` 97-shmall=`cat /proc/sys/kernel/shmall` 98-echo 268435456 > /proc/sys/kernel/shmmax 99-echo 4194304 > /proc/sys/kernel/shmall 100-echo "--------------------" 101-echo "running hugepage-shm" 102-echo "--------------------" 103-./hugepage-shm 104-if [ $? -ne 0 ]; then 105- echo "[FAIL]" 106- exitcode=1 107-else 108- echo "[PASS]" 109-fi 110-echo $shmmax > /proc/sys/kernel/shmmax 111-echo $shmall > /proc/sys/kernel/shmall 112- 113-echo "-------------------" 114-echo "running map_hugetlb" 115-echo "-------------------" 116-./map_hugetlb 117-if [ $? -ne 0 ]; then 118- echo "[FAIL]" 119- exitcode=1 120-else 121- echo "[PASS]" 122-fi 123- 124-echo "-----------------------" 125-echo "running hugepage-mremap" 126-echo "-----------------------" 127-./hugepage-mremap $mnt/huge_mremap 128-if [ $? -ne 0 ]; then 129- echo "[FAIL]" 130- exitcode=1 131-else 132- echo "[PASS]" 133-fi 134-rm -f $mnt/huge_mremap 135- 136-echo "NOTE: The above hugetlb tests provide minimal coverage. Use" 137-echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" 138-echo " hugetlb regression testing." 139- 140-echo "---------------------------" 141-echo "running map_fixed_noreplace" 142-echo "---------------------------" 143-./map_fixed_noreplace 144-if [ $? -ne 0 ]; then 145- echo "[FAIL]" 146- exitcode=1 147-else 148- echo "[PASS]" 149-fi 150- 151-echo "------------------------------------------------------" 152-echo "running: gup_test -u # get_user_pages_fast() benchmark" 153-echo "------------------------------------------------------" 154-./gup_test -u 155-if [ $? -ne 0 ]; then 156- echo "[FAIL]" 157- exitcode=1 158-else 159- echo "[PASS]" 160-fi 161- 162-echo "------------------------------------------------------" 163-echo "running: gup_test -a # pin_user_pages_fast() benchmark" 164-echo "------------------------------------------------------" 165-./gup_test -a 166-if [ $? -ne 0 ]; then 167- echo "[FAIL]" 168- exitcode=1 169-else 170- echo "[PASS]" 171-fi 172- 173-echo "------------------------------------------------------------" 174-echo "# Dump pages 0, 19, and 4096, using pin_user_pages:" 175-echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test" 176-echo "------------------------------------------------------------" 177-./gup_test -ct -F 0x1 0 19 0x1000 178-if [ $? -ne 0 ]; then 179- echo "[FAIL]" 180- exitcode=1 181-else 182- echo "[PASS]" 183-fi 184- 185-echo "-------------------" 186-echo "running userfaultfd" 187-echo "-------------------" 188-./userfaultfd anon 20 16 189-if [ $? -ne 0 ]; then 190- echo "[FAIL]" 191- exitcode=1 192-else 193- echo "[PASS]" 194-fi 195- 196-echo "---------------------------" 197-echo "running userfaultfd_hugetlb" 198-echo "---------------------------" 199-# Test requires source and destination huge pages. Size of source 200-# (half_ufd_size_MB) is passed as argument to test. 201-./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file 202-if [ $? -ne 0 ]; then 203- echo "[FAIL]" 204- exitcode=1 205-else 206- echo "[PASS]" 207-fi 208-rm -f $mnt/ufd_test_file 209- 210-echo "-------------------------" 211-echo "running userfaultfd_shmem" 212-echo "-------------------------" 213-./userfaultfd shmem 20 16 214-if [ $? -ne 0 ]; then 215- echo "[FAIL]" 216- exitcode=1 217-else 218- echo "[PASS]" 219-fi 220- 221-#cleanup 222-umount $mnt 223-rm -rf $mnt 224-echo $nr_hugepgs > /proc/sys/vm/nr_hugepages 225- 226-echo "-----------------------" 227-echo "running compaction_test" 228-echo "-----------------------" 229-./compaction_test 230-if [ $? -ne 0 ]; then 231- echo "[FAIL]" 232- exitcode=1 233-else 234- echo "[PASS]" 235-fi 236+##get huge pagesize and freepages from /proc/meminfo 237+#while read name size unit; do 238+# if [ "$name" = "HugePages_Free:" ]; then 239+# freepgs=$size 240+# fi 241+# if [ "$name" = "Hugepagesize:" ]; then 242+# hpgsize_KB=$size 243+# fi 244+#done < /proc/meminfo 245+# 246+## Simple hugetlbfs tests have a hardcoded minimum requirement of 247+## huge pages totaling 256MB (262144KB) in size. The userfaultfd 248+## hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take 249+## both of these requirements into account and attempt to increase 250+## number of huge pages available. 251+#nr_cpus=$(nproc) 252+#hpgsize_MB=$((hpgsize_KB / 1024)) 253+#half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) 254+#needmem_KB=$((half_ufd_size_MB * 2 * 1024)) 255+# 256+##set proper nr_hugepages 257+#if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then 258+# nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` 259+# needpgs=$((needmem_KB / hpgsize_KB)) 260+# tries=2 261+# while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do 262+# lackpgs=$(( $needpgs - $freepgs )) 263+# echo 3 > /proc/sys/vm/drop_caches 264+# echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages 265+# if [ $? -ne 0 ]; then 266+# echo "Please run this test as root" 267+# exit $ksft_skip 268+# fi 269+# while read name size unit; do 270+# if [ "$name" = "HugePages_Free:" ]; then 271+# freepgs=$size 272+# fi 273+# done < /proc/meminfo 274+# tries=$((tries - 1)) 275+# done 276+# if [ $freepgs -lt $needpgs ]; then 277+# printf "Not enough huge pages available (%d < %d)\n" \ 278+# $freepgs $needpgs 279+# exit 1 280+# fi 281+#else 282+# echo "no hugetlbfs support in kernel?" 283+# exit 1 284+#fi 285+# 286+##filter 64bit architectures 287+#ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64" 288+#if [ -z $ARCH ]; then 289+# ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'` 290+#fi 291+#VADDR64=0 292+#echo "$ARCH64STR" | grep $ARCH && VADDR64=1 293+# 294+#mkdir $mnt 295+#mount -t hugetlbfs none $mnt 296+# 297+#echo "---------------------" 298+#echo "running hugepage-mmap" 299+#echo "---------------------" 300+#./hugepage-mmap 301+#if [ $? -ne 0 ]; then 302+# echo "[FAIL]" 303+# exitcode=1 304+#else 305+# echo "[PASS]" 306+#fi 307+# 308+#shmmax=`cat /proc/sys/kernel/shmmax` 309+#shmall=`cat /proc/sys/kernel/shmall` 310+#echo 268435456 > /proc/sys/kernel/shmmax 311+#echo 4194304 > /proc/sys/kernel/shmall 312+#echo "--------------------" 313+#echo "running hugepage-shm" 314+#echo "--------------------" 315+#./hugepage-shm 316+#if [ $? -ne 0 ]; then 317+# echo "[FAIL]" 318+# exitcode=1 319+#else 320+# echo "[PASS]" 321+#fi 322+#echo $shmmax > /proc/sys/kernel/shmmax 323+#echo $shmall > /proc/sys/kernel/shmall 324+# 325+#echo "-------------------" 326+#echo "running map_hugetlb" 327+#echo "-------------------" 328+#./map_hugetlb 329+#if [ $? -ne 0 ]; then 330+# echo "[FAIL]" 331+# exitcode=1 332+#else 333+# echo "[PASS]" 334+#fi 335+# 336+#echo "-----------------------" 337+#echo "running hugepage-mremap" 338+#echo "-----------------------" 339+#./hugepage-mremap $mnt/huge_mremap 340+#if [ $? -ne 0 ]; then 341+# echo "[FAIL]" 342+# exitcode=1 343+#else 344+# echo "[PASS]" 345+#fi 346+#rm -f $mnt/huge_mremap 347+# 348+#echo "NOTE: The above hugetlb tests provide minimal coverage. Use" 349+#echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" 350+#echo " hugetlb regression testing." 351+# 352+#echo "---------------------------" 353+#echo "running map_fixed_noreplace" 354+#echo "---------------------------" 355+#./map_fixed_noreplace 356+#if [ $? -ne 0 ]; then 357+# echo "[FAIL]" 358+# exitcode=1 359+#else 360+# echo "[PASS]" 361+#fi 362+# 363+#echo "------------------------------------------------------" 364+#echo "running: gup_test -u # get_user_pages_fast() benchmark" 365+#echo "------------------------------------------------------" 366+#./gup_test -u 367+#if [ $? -ne 0 ]; then 368+# echo "[FAIL]" 369+# exitcode=1 370+#else 371+# echo "[PASS]" 372+#fi 373+# 374+#echo "------------------------------------------------------" 375+#echo "running: gup_test -a # pin_user_pages_fast() benchmark" 376+#echo "------------------------------------------------------" 377+#./gup_test -a 378+#if [ $? -ne 0 ]; then 379+# echo "[FAIL]" 380+# exitcode=1 381+#else 382+# echo "[PASS]" 383+#fi 384+# 385+#echo "------------------------------------------------------------" 386+#echo "# Dump pages 0, 19, and 4096, using pin_user_pages:" 387+#echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test" 388+#echo "------------------------------------------------------------" 389+#./gup_test -ct -F 0x1 0 19 0x1000 390+#if [ $? -ne 0 ]; then 391+# echo "[FAIL]" 392+# exitcode=1 393+#else 394+# echo "[PASS]" 395+#fi 396+# 397+#echo "-------------------" 398+#echo "running userfaultfd" 399+#echo "-------------------" 400+#./userfaultfd anon 20 16 401+#if [ $? -ne 0 ]; then 402+# echo "[FAIL]" 403+# exitcode=1 404+#else 405+# echo "[PASS]" 406+#fi 407+# 408+#echo "---------------------------" 409+#echo "running userfaultfd_hugetlb" 410+#echo "---------------------------" 411+## Test requires source and destination huge pages. Size of source 412+## (half_ufd_size_MB) is passed as argument to test. 413+#./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file 414+#if [ $? -ne 0 ]; then 415+# echo "[FAIL]" 416+# exitcode=1 417+#else 418+# echo "[PASS]" 419+#fi 420+#rm -f $mnt/ufd_test_file 421+# 422+#echo "-------------------------" 423+#echo "running userfaultfd_shmem" 424+#echo "-------------------------" 425+#./userfaultfd shmem 20 16 426+#if [ $? -ne 0 ]; then 427+# echo "[FAIL]" 428+# exitcode=1 429+#else 430+# echo "[PASS]" 431+#fi 432+# 433+##cleanup 434+#umount $mnt 435+#rm -rf $mnt 436+#echo $nr_hugepgs > /proc/sys/vm/nr_hugepages 437+# 438+#echo "-----------------------" 439+#echo "running compaction_test" 440+#echo "-----------------------" 441+#./compaction_test 442+#if [ $? -ne 0 ]; then 443+# echo "[FAIL]" 444+# exitcode=1 445+#else 446+# echo "[PASS]" 447+#fi 448 449 echo "----------------------" 450 echo "running on-fault-limit" 451-- 4522.36.0.550.gb090851708-goog 453 454