• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1name: trunk
2
3on:
4  push:
5    branches:
6      - main
7      - release/*
8    tags:
9      - ciflow/trunk/*
10  pull_request:
11    paths:
12      - .ci/docker/ci_commit_pins/pytorch.txt
13      - .ci/scripts/**
14  workflow_dispatch:
15
16concurrency:
17  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
18  cancel-in-progress: true
19
20jobs:
21  gather-models:
22    runs-on: ubuntu-22.04
23    outputs:
24      models: ${{ steps.gather-models.outputs.models }}
25    steps:
26      - uses: actions/checkout@v3
27        with:
28          submodules: 'false'
29      - uses: actions/setup-python@v4
30        with:
31          python-version: '3.10'
32      - name: Extract the list of models to test
33        id: gather-models
34        run: |
35          set -eux
36
37          PYTHONPATH="${PWD}" python .ci/scripts/gather_test_models.py --target-os macos --event "${GITHUB_EVENT_NAME}"
38
39  test-models-macos:
40    name: test-models-macos
41    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
42    needs: gather-models
43    strategy:
44      matrix: ${{ fromJSON(needs.gather-models.outputs.models) }}
45      fail-fast: false
46    with:
47      runner: ${{ matrix.runner }}
48      python-version: '3.11'
49      submodules: 'true'
50      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
51      timeout: ${{ matrix.timeout }}
52      script: |
53        MODEL_NAME=${{ matrix.model }}
54        BUILD_TOOL=${{ matrix.build-tool }}
55        BACKEND=${{ matrix.backend }}
56        DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }}
57
58        bash .ci/scripts/setup-conda.sh
59        # Setup MacOS dependencies as there is no Docker support on MacOS atm
60        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
61        # Build and test executorch
62        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" "${DEMO_BACKEND_DELEGATION}"
63
64  test-custom-ops-macos:
65    name: test-custom-ops-macos
66    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
67    strategy:
68      matrix:
69        include:
70          - build-tool: cmake
71      fail-fast: false
72    with:
73      runner: macos-m1-stable
74      python-version: '3.11'
75      submodules: 'true'
76      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
77      script: |
78        BUILD_TOOL=${{ matrix.build-tool }}
79
80        bash .ci/scripts/setup-conda.sh
81        # Setup MacOS dependencies as there is no Docker support on MacOS atm
82        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
83        # Build and test custom ops
84        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/portable/custom_ops/test_custom_ops.sh "${BUILD_TOOL}"
85
86  test-selective-build-macos:
87    name: test-selective-build-macos
88    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
89    strategy:
90      matrix:
91        include:
92          - build-tool: cmake
93      fail-fast: false
94    with:
95      runner: macos-m1-stable
96      python-version: '3.11'
97      submodules: 'true'
98      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
99      script: |
100        BUILD_TOOL=${{ matrix.build-tool }}
101
102        bash .ci/scripts/setup-conda.sh
103        # Setup MacOS dependencies as there is no Docker support on MacOS atm
104        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
105        # Build and test selective build
106        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/selective_build/test_selective_build.sh "${BUILD_TOOL}"
107
108  test-demo-backend-delegation:
109    name: test-demo-backend-delegation
110    uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.5
111    strategy:
112      matrix:
113        include:
114          - build-tool: buck2
115          - build-tool: cmake
116      fail-fast: false
117    with:
118      runner: linux.2xlarge
119      docker-image: executorch-ubuntu-22.04-clang12
120      submodules: 'true'
121      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
122      script: |
123        # The generic Linux job chooses to use base env, not the one setup by the image
124        CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
125        conda activate "${CONDA_ENV}"
126
127        BUILD_TOOL=${{ matrix.build-tool }}
128        PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}"
129        # Test selective build
130        PYTHON_EXECUTABLE=python bash examples/portable/scripts/test_demo_backend_delegation.sh "${BUILD_TOOL}"
131
132  test-arm-backend-delegation:
133    name: test-arm-backend-delegation
134    uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.5
135    with:
136      runner: linux.2xlarge
137      docker-image: executorch-ubuntu-22.04-arm-sdk
138      submodules: 'true'
139      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
140      timeout: 90
141      script: |
142        # The generic Linux job chooses to use base env, not the one setup by the image
143        CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
144        conda activate "${CONDA_ENV}"
145
146        source .ci/scripts/utils.sh
147        install_executorch
148
149        install_arm
150
151        # Increase number of files user can monitor to bypass buck failures.
152        # Hopefully this is high enough for this setup.
153        sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024
154
155        # Test ethos-u delegate examples with run.sh
156        PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/
157
158  test-arm-reference-delegation:
159    name: test-arm-reference-delegation
160    uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.5
161    with:
162      runner: linux.2xlarge
163      docker-image: executorch-ubuntu-22.04-arm-sdk
164      submodules: 'true'
165      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
166      timeout: 90
167      script: |
168        # The generic Linux job chooses to use base env, not the one setup by the image
169        CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
170        conda activate "${CONDA_ENV}"
171
172        source .ci/scripts/utils.sh
173        install_executorch
174
175        install_arm
176
177        # Run arm unit tests
178        pytest -c /dev/null -v -n auto --cov=./ --cov-report=xml backends/arm/test
179
180  test-coreml-delegate:
181    name: test-coreml-delegate
182    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
183    with:
184      runner: macos-13-xlarge
185      python-version: '3.11'
186      submodules: 'true'
187      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
188      timeout: 90
189      script: |
190        BUILD_TOOL=cmake
191
192        bash .ci/scripts/setup-conda.sh
193        # Setup MacOS dependencies as there is no Docker support on MacOS atm
194        GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
195        # Build and test coreml delegate
196        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/build_all.sh
197
198  test-pybind-build-macos:
199    name: test-pybind-build-macos
200    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
201    strategy:
202      matrix:
203        include:
204          - build-tool: cmake
205      fail-fast: false
206    with:
207      runner: macos-m1-stable
208      python-version: '3.11'
209      submodules: 'true'
210      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
211      timeout: 180
212      script: |
213        bash .ci/scripts/setup-conda.sh
214
215        # build module for executorch.extension.pybindings.portable_lib
216        BUILD_TOOL=${{ matrix.build-tool }}
217        EXECUTORCH_BUILD_PYBIND=ON PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
218
219        # see if we can import the module successfully
220        ${CONDA_RUN} python -c "from executorch.extension.pybindings import portable_lib; print('success!')"
221
222  test-llama-runner-macos:
223    name: test-llama-runner-mac
224    uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
225    strategy:
226      matrix:
227        dtype: [fp32]
228        mode: [portable, xnnpack+kv+custom, mps, coreml]
229        include:
230          - dtype: bf16
231            mode: portable
232          - dtype: bf16
233            mode: custom
234      fail-fast: false
235    with:
236      runner: macos-m1-stable
237      python-version: '3.11'
238      submodules: 'true'
239      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
240      timeout: 900
241      script: |
242
243        DTYPE=${{ matrix.dtype }}
244        MODE=${{ matrix.mode }}
245
246        bash .ci/scripts/setup-conda.sh
247
248        # Setup executorch
249        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh cmake
250
251        if [[ "${MODE}" == "mps" ]]; then
252          # Install mps delegate
253          PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/mps/install_requirements.sh
254          echo "Finishing installing mps."
255        elif [[ "${MODE}" == "coreml" ]]; then
256          # Install coreml delegate
257          PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh
258          echo "Finishing installing coreml."
259        fi
260
261        # Install requirements for export_llama
262        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
263        # Test llama2
264        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh -model stories110M -build_tool cmake -dtype "${DTYPE}" -mode "${MODE}"
265
266  # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner.
267  # test-llava-runner-macos:
268  #   name: test-llava-runner-macos
269  #   uses: pytorch/test-infra/.github/workflows/macos_job.yml@release/2.5
270  #   strategy:
271  #     fail-fast: false
272  #   with:
273  #     runner: macos-14-xlarge
274  #     python-version: '3.11'
275  #     submodules: 'true'
276  #     ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
277  #     timeout: 900
278  #     script: |
279  #       BUILD_TOOL=cmake
280
281  #       bash .ci/scripts/setup-conda.sh
282  #       # Setup MacOS dependencies as there is no Docker support on MacOS atm
283  #       GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
284
285  #       # install Llava requirements
286  #       ${CONDA_RUN} bash examples/models/llama/install_requirements.sh
287  #       ${CONDA_RUN} bash examples/models/llava/install_requirements.sh
288
289  #       # run python unittest
290  #       ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava
291
292  #       # run e2e (export, tokenizer and runner)
293  #       PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh Release
294
295  test-qnn-model:
296    name: test-qnn-model
297    uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.5
298    strategy:
299      matrix:
300        dtype: [fp32]
301        model: [dl3, mv3, mv2, ic4, ic3, vit]
302      fail-fast: false
303    with:
304      runner: linux.2xlarge
305      docker-image: executorch-ubuntu-22.04-qnn-sdk
306      submodules: 'true'
307      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
308      timeout: 900
309      script: |
310        # The generic Linux job chooses to use base env, not the one setup by the image
311        CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
312        conda activate "${CONDA_ENV}"
313        PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake
314        PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
315        PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
316        PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh ${{ matrix.model }} "cmake" "qnn"
317
318  test-apple-model:
319    name: test-apple-model
320    uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
321    strategy:
322      fail-fast: false
323    with:
324      runner: macos-m1-stable
325      python-version: '3.11'
326      submodules: 'true'
327      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
328      timeout: 90
329      script: |
330        BUILD_TOOL=cmake
331
332        bash .ci/scripts/setup-conda.sh
333
334        # Setup MacOS dependencies as there is no Docker support on MacOS atm
335        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
336        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh
337        echo "Finishing installing coreml."
338        PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/mps/install_requirements.sh
339        echo "Finishing installing mps."
340
341        # Build and test coreml model
342        MODELS=(mv3 ic4 resnet50 edsr mobilebert w2l)
343        for MODEL_NAME in "${MODELS[@]}"; do
344          echo "::group::Exporting coreml model: $MODEL_NAME"
345          PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "coreml"
346          echo "::endgroup::"
347
348          echo "::group::Exporting mps model: $MODEL_NAME"
349          PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "mps"
350          echo "::endgroup::"
351        done
352
353  test-huggingface-transformers:
354    name: test-huggingface-transformers
355    uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.5
356    secrets: inherit
357    strategy:
358      matrix:
359        hf_model_repo: [google/gemma-2b]
360      fail-fast: false
361    with:
362      secrets-env: EXECUTORCH_HF_TOKEN
363      runner: linux.12xlarge
364      docker-image: executorch-ubuntu-22.04-clang12
365      submodules: 'true'
366      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
367      timeout: 90
368      script: |
369        echo "::group::Set up ExecuTorch"
370        # The generic Linux job chooses to use base env, not the one setup by the image
371        CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
372        conda activate "${CONDA_ENV}"
373        PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake
374
375        echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a"
376        rm -rf cmake-out
377        cmake \
378            -DCMAKE_INSTALL_PREFIX=cmake-out \
379            -DCMAKE_BUILD_TYPE=Release \
380            -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
381            -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
382            -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
383            -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
384            -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
385            -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
386            -DEXECUTORCH_BUILD_XNNPACK=ON \
387            -DPYTHON_EXECUTABLE=python \
388            -Bcmake-out .
389        cmake --build cmake-out -j9 --target install --config Release
390
391        echo "Build llama runner"
392        dir="examples/models/llama"
393        cmake \
394            -DCMAKE_INSTALL_PREFIX=cmake-out \
395            -DCMAKE_BUILD_TYPE=Release \
396            -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
397            -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
398            -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
399            -DEXECUTORCH_BUILD_XNNPACK=ON \
400            -DPYTHON_EXECUTABLE=python \
401            -Bcmake-out/${dir} \
402            ${dir}
403        cmake --build cmake-out/${dir} -j9 --config Release
404        echo "::endgroup::"
405
406        echo "::group::Set up HuggingFace Dependencies"
407        if [ -z "$SECRET_EXECUTORCH_HF_TOKEN" ]; then
408          echo "::error::SECRET_EXECUTORCH_HF_TOKEN is empty. For security reason secrets won't be accessible on forked PRs. Please make sure you submit a non-forked PR."
409          exit 1
410        fi
411        pip install -U "huggingface_hub[cli]"
412        huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
413        pip install accelerate sentencepiece
414        pip list
415        echo "::endgroup::"
416
417        echo "::group::Export to ExecuTorch"
418        TOKENIZER_FILE=tokenizer.model
419        TOKENIZER_BIN_FILE=tokenizer.bin
420        ET_MODEL_NAME=et_model
421        # Fetch the file using a Python one-liner
422        DOWNLOADED_TOKENIZER_FILE_PATH=$(python -c "
423        from huggingface_hub import hf_hub_download
424        # Download the file from the Hugging Face Hub
425        downloaded_path = hf_hub_download(
426            repo_id='${{ matrix.hf_model_repo }}',
427            filename='${TOKENIZER_FILE}'
428        )
429        print(downloaded_path)
430        ")
431        if [ -f "$DOWNLOADED_TOKENIZER_FILE_PATH" ]; then
432            echo "${TOKENIZER_FILE} downloaded successfully at: $DOWNLOADED_TOKENIZER_FILE_PATH"
433            python -m extension.llm.tokenizer.tokenizer -t $DOWNLOADED_TOKENIZER_FILE_PATH -o ./${TOKENIZER_BIN_FILE}
434            ls ./tokenizer.bin
435        else
436            echo "Failed to download ${TOKENIZER_FILE} from ${{ matrix.hf_model_repo }}."
437            exit 1
438        fi
439
440        python -m extension.export_util.export_hf_model -hfm=${{ matrix.hf_model_repo }} -o ${ET_MODEL_NAME}
441
442        cmake-out/examples/models/llama/llama_main --model_path=${ET_MODEL_NAME}.pte --tokenizer_path=${TOKENIZER_BIN_FILE} --prompt="My name is"
443        echo "::endgroup::"
444