• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/bin/sh
2
3# Make sure to kill itself and all the children process from this script on
4# exiting, since any console output may interfere with LAVA signals handling,
5# which based on the log console.
6cleanup() {
7  if [ "$BACKGROUND_PIDS" = "" ]; then
8    return 0
9  fi
10
11  set +x
12  echo "Killing all child processes"
13  for pid in $BACKGROUND_PIDS
14  do
15    kill "$pid" 2>/dev/null || true
16  done
17
18  # Sleep just a little to give enough time for subprocesses to be gracefully
19  # killed. Then apply a SIGKILL if necessary.
20  sleep 5
21  for pid in $BACKGROUND_PIDS
22  do
23    kill -9 "$pid" 2>/dev/null || true
24  done
25
26  BACKGROUND_PIDS=
27  set -x
28}
29trap cleanup INT TERM EXIT
30
31# Space separated values with the PIDS of the processes started in the
32# background by this script
33BACKGROUND_PIDS=
34
35
36# Second-stage init, used to set up devices and our job environment before
37# running tests.
38
39. /set-job-env-vars.sh
40
41set -ex
42
43# Set up any devices required by the jobs
44[ -z "$HWCI_KERNEL_MODULES" ] || {
45    echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
46}
47
48#
49# Load the KVM module specific to the detected CPU virtualization extensions:
50# - vmx for Intel VT
51# - svm for AMD-V
52#
53# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
54#
55if [ "$HWCI_KVM" = "true" ]; then
56    unset KVM_KERNEL_MODULE
57    grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || {
58        grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
59    }
60
61    [ -z "${KVM_KERNEL_MODULE}" ] && \
62        echo "WARNING: Failed to detect CPU virtualization extensions" || \
63        modprobe ${KVM_KERNEL_MODULE}
64
65    mkdir -p /lava-files
66    wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \
67        "${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
68fi
69
70# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
71# it in /install
72ln -sf $CI_PROJECT_DIR/install /install
73export LD_LIBRARY_PATH=/install/lib
74export LIBGL_DRIVERS_PATH=/install/lib/dri
75
76# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
77export XDG_CACHE_HOME=/tmp
78
79# Make sure Python can find all our imports
80export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
81
82if [ "$HWCI_FREQ_MAX" = "true" ]; then
83  # Ensure initialization of the DRM device (needed by MSM)
84  head -0 /dev/dri/renderD128
85
86  # Disable GPU frequency scaling
87  DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
88  test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
89
90  # Disable CPU frequency scaling
91  echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
92
93  # Disable GPU runtime power management
94  GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
95  test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
96  # Lock Intel GPU frequency to 70% of the maximum allowed by hardware
97  # and enable throttling detection & reporting.
98  # Additionally, set the upper limit for CPU scaling frequency to 65% of the
99  # maximum permitted, as an additional measure to mitigate thermal throttling.
100  ./intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
101fi
102
103# Increase freedreno hangcheck timer because it's right at the edge of the
104# spilling tests timing out (and some traces, too)
105if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
106    echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
107fi
108
109# Start a little daemon to capture the first devcoredump we encounter.  (They
110# expire after 5 minutes, so we poll for them).
111/capture-devcoredump.sh &
112BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
113
114# If we want Xorg to be running for the test, then we start it up before the
115# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
116# without using -displayfd you can race with Xorg's startup), but xinit will eat
117# your client's return code
118if [ -n "$HWCI_START_XORG" ]; then
119  echo "touch /xorg-started; sleep 100000" > /xorg-script
120  env \
121    xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
122  BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
123
124  # Wait for xorg to be ready for connections.
125  for i in 1 2 3 4 5; do
126    if [ -e /xorg-started ]; then
127      break
128    fi
129    sleep 5
130  done
131  export DISPLAY=:0
132fi
133
134RESULT=fail
135set +e
136sh -c "$HWCI_TEST_SCRIPT"
137EXIT_CODE=$?
138set -e
139
140# Let's make sure the results are always stored in current working directory
141mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
142
143[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
144
145# Make sure that capture-devcoredump is done before we start trying to tar up
146# artifacts -- if it's writing while tar is reading, tar will throw an error and
147# kill the job.
148cleanup
149
150# upload artifacts
151if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
152  tar -czf results.tar.gz results/;
153  ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.gz https://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
154fi
155
156# We still need to echo the hwci: mesa message, as some scripts rely on it, such
157# as the python ones inside the bare-metal folder
158[ ${EXIT_CODE} -eq 0 ] && RESULT=pass
159
160set +x
161echo "hwci: mesa: $RESULT"
162# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
163sleep 1
164exit $EXIT_CODE
165