• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env bash
2# shellcheck disable=SC2086 # we want word splitting
3
4set -ue
5
6# Instead of starting one dEQP instance per available CPU core, pour our
7# concurrency at llvmpipe threads instead. This is mostly useful for VirGL and
8# Venus, which serialise quite a bit at the host level. So instead of smashing
9# it with a pile of concurrent jobs which don't actually parallelise very well,
10# we use that concurrency for llvmpipe/lavapipe's render pipeline.
11if [ -n "${PARALLELISE_VIA_LP_THREADS:-}" ]; then
12    export LP_NUM_THREADS="${FDO_CI_CONCURRENT:-4}"
13    export FDO_CI_CONCURRENT=1
14fi
15
16# If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act
17# the same as the first thread in its threadpool.
18THREAD=${DEQP_RUNNER_THREAD:-0}
19
20#
21# Helper to generate CIDs for virtio-vsock based communication with processes
22# running inside crosvm guests.
23#
24# A CID is a 32-bit Context Identifier to be assigned to a crosvm instance
25# and must be unique across the host system. For this purpose, let's take
26# the least significant 25 bits from CI_JOB_ID as a base and generate a 7-bit
27# prefix number to handle up to 128 concurrent crosvm instances per job runner.
28#
29# As a result, the following variables are set:
30#  - VSOCK_CID: the crosvm unique CID to be passed as a run argument
31#
32#  - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept
33#    vsock connections on in order to transfer output messages
34#
35#  - VM_TEMP_DIR: the temporary directory path used to pass additional
36#    context data towards the guest
37#
38set_vsock_context() {
39    [ -n "${CI_JOB_ID:-}" ] || {
40        echo "Missing or unset CI_JOB_ID env variable" >&2
41        exit 1
42    }
43
44    VM_TEMP_DIR="/tmp-vm.${THREAD}"
45    # Clear out any leftover files from a previous run.
46    rm -rf $VM_TEMP_DIR
47    mkdir $VM_TEMP_DIR || return 1
48
49    VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((THREAD & 0x7f) << 25)))
50    VSOCK_STDOUT=5001
51    VSOCK_STDERR=5002
52
53    return 0
54}
55
56# The dEQP binary needs to run from the directory it's in
57if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then
58    DEQP_BIN_DIR=$(dirname "$1")
59    export DEQP_BIN_DIR
60fi
61
62VM_SOCKET=crosvm-${THREAD}.sock
63
64# Terminate any existing crosvm, if a previous invocation of this shell script
65# was terminated due to timeouts.  This "vm stop" may fail if the crosvm died
66# without cleaning itself up.
67if [ -e $VM_SOCKET ]; then
68   crosvm stop $VM_SOCKET || true
69   # Wait for socats from that invocation to drain
70   sleep 5
71   rm -rf $VM_SOCKET || true
72fi
73
74set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
75
76# Securely pass the current variables to the crosvm environment
77echo "Variables passed through:"
78SCRIPTS_DIR=$(readlink -en "${0%/*}")
79${SCRIPTS_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
80cp ${SCRIPTS_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh
81
82# Set the crosvm-script as the arguments of the current script
83{
84  echo "export SCRIPTS_DIR=${SCRIPTS_DIR}"
85  echo "export RESULTS_DIR=${RESULTS_DIR}"
86  echo ". ${VM_TEMP_DIR}/setup-test-env.sh"
87  echo "$@"
88} > ${VM_TEMP_DIR}/crosvm-script.sh
89
90# Setup networking
91/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
92echo 1 > /proc/sys/net/ipv4/ip_forward
93
94# Start background processes to receive output from guest
95socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr &
96socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout &
97
98# Prepare to start crosvm
99unset DISPLAY
100unset XDG_RUNTIME_DIR
101
102CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
103CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPTS_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
104
105[ "${CROSVM_GALLIUM_DRIVER:-}" = "llvmpipe" ] && \
106    CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
107
108set +e
109
110if [ "${INSIDE_DEQP_RUNNER:-}" != "true" ]
111then
112  set -x
113fi
114
115# We aren't testing the host driver here, so we don't need to validate NIR on the host
116NIR_DEBUG="novalidate" \
117LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE:-} \
118GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER:-} \
119VK_DRIVER_FILES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER:-}_icd.x86_64.json \
120crosvm --no-syslog run \
121    --gpu "${CROSVM_GPU_ARGS:-}" --gpu-render-server "path=${VIRGL_RENDER_SERVER:-/usr/local/libexec/virgl_render_server}" \
122    -m "${CROSVM_MEMORY:-4096}" -c "${CROSVM_CPU:-2}" --disable-sandbox \
123    --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
124    --net "host-ip=192.168.30.1,netmask=255.255.255.0,mac=AA:BB:CC:00:00:12" \
125    -s $VM_SOCKET \
126    --cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \
127    /lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VM_TEMP_DIR}/crosvm 2>&1
128
129CROSVM_RET=$?
130
131[ ${CROSVM_RET} -eq 0 ] && {
132    # The actual return code is the crosvm guest script's exit code
133    CROSVM_RET=$(cat ${VM_TEMP_DIR}/exit_code 2>/dev/null)
134    # Force error when the guest script's exit code is not available
135    CROSVM_RET=${CROSVM_RET:-1}
136}
137
138# Show crosvm output on error to help with debugging
139[ ${CROSVM_RET} -eq 0 ] || {
140    { set +x; } 2>/dev/null
141    echo "Dumping crosvm output.." >&2
142    cat ${VM_TEMP_DIR}/crosvm >&2
143    set -x
144}
145
146exit ${CROSVM_RET}
147