Home
last modified time | relevance | path

Searched +full:build +full:- +full:docker +full:- +full:xpu (Results 1 – 18 of 18) sorted by relevance

/external/pytorch/.github/workflows/
Dxpu.yml1 name: xpu
6 - ciflow/xpu/*
10-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && git…
11 cancel-in-progress: true
15 get-label-type:
16 name: get-label-type
17 uses: ./.github/workflows/_runner-determinator.yml
24 linux-jammy-xpu-py3_9-build:
25 name: linux-jammy-xpu-py3.9
26 uses: ./.github/workflows/_linux-build.yml
[all …]
D_xpu-test.yml1 # TODO: this looks sort of similar to _linux-test, but there are like a dozen
5 name: xpu-test
10 build-environment:
13 description: Top-level label for what's being built/tested.
14 test-matrix:
18 docker-image:
21 description: Docker image to run in.
22 sync-tag:
28 job with the same `sync-tag` is identical.
29 timeout-minutes:
[all …]
Dbuild-manywheel-images.yml1 name: Build manywheel docker images
7 - main
8 - release/*
10 … # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds
11 # Release candidate tags look like: v1.11.0-rc1
12 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
14 - '.ci/docker/manywheel/*'
15 - '.ci/docker/manywheel/build_scripts/*'
16 - '.ci/docker/common/*'
17 - .github/workflows/build-manywheel-images.yml
[all …]
Ddocker-builds.yml1 name: docker-builds
7 - .ci/docker/**
8 - .github/workflows/docker-builds.yml
9 - .lintrunner.toml
12 - main
13 - release/*
14 - landchecks/*
16 - .ci/docker/**
17 - .github/workflows/docker-builds.yml
18 - .lintrunner.toml
[all …]
/external/pytorch/.ci/docker/
DREADME.md1 # Docker images for GitHub CI and CD
3 This directory contains everything needed to build the Docker images
7 conditionally run build stages depending on build arguments passed to
8 `docker build`. This lets us use only a few Dockerfiles for many
13 See `build.sh` for valid build environments (it's the giant switch).
15 ## Docker CI builds
17 * `build.sh` -- dispatch script to launch all builds
18 * `common` -- scripts used to execute individual Docker build stages
19 * `ubuntu` -- Dockerfile for Ubuntu image for CPU build and test jobs
20 * `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
[all …]
Dbuild.sh3 set -ex
8 if [ -z "${image}" ]; then
14 eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?)/ && print \$1")
22 # parts $image into array, splitting on '-'
24 IFS="-"
25 declare -a parts=($image)
30 name=$(echo "${part}" | perl -n -e"/([a-zA-Z]+)\d+(\.\d+)?(\.\d+)?/ && print \$1")
36 # skip non-conforming fields such as "pytorch", "linux" or "bionic" without version string
37 if [ -n "${name}" ]; then
43 # Use the same pre-built XLA test image from PyTorch/XLA
[all …]
/external/pytorch/.ci/docker/manywheel/
Dbuild.sh4 set -eou pipefail
6 TOPDIR=$(git rev-parse --show-toplevel)
11 if [ -z "${image}" ]; then
18 DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}"
20 GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu}
21 GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
22 MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
23 DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
24 WITH_PUSH=${WITH_PUSH:-}
31 DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
[all …]
DDockerfile_2_281 # syntax = docker/dockerfile:experimental
7 ENV LC_ALL en_US.UTF-8
8 ENV LANG en_US.UTF-8
9 ENV LANGUAGE en_US.UTF-8
12 RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-util…
13 ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
14 ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${…
16 # cmake-3.18.4 from pip
17 RUN yum install -y python3-pip && \
18 python3 -mpip install cmake==3.18.4 && \
[all …]
/external/pytorch/.github/
Dmerge_rules.yaml1 - name: ONNX exporter
3 - .ci/caffe2/*
4 - .ci/onnx/*
5 - .ci/docker/common/install_onnx.sh
6 - aten/src/ATen/core/interned_strings.h
7 - benchmarks/dynamo/**
8 - docs/source/onnx.rst
9 - docs/source/onnx*
10 - docs/source/scripts/onnx/**
11 - docs/source/_static/img/onnx/**
[all …]
/external/pytorch/.github/scripts/
Dbuild_triton_wheel.py16 def read_triton_pin(device: str = "cuda") -> str:
18 if device == "xpu":
19 triton_file = "triton-xpu.txt"
20 with open(REPO_DIR / ".ci" / "docker" / "ci_commit_pins" / triton_file) as f:
24 def read_triton_version() -> str:
25 with open(REPO_DIR / ".ci" / "docker" / "triton_version.txt") as f:
29 def check_and_replace(inp: str, src: str, dst: str) -> str:
38 ) -> None:
51 # TODO: remove patch_setup_py() once we have a proper fix for https://github.com/triton-lang/triton…
52 def patch_setup_py(path: Path) -> None:
[all …]
/external/pytorch/.ci/docker/ubuntu-xpu/
DDockerfile37 COPY requirements-ci.txt requirements-docs.txt /opt/conda/
40 … rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.t…
62 RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
65 # Install XPU Dependencies
72 # try to reach out to S3, which docker build runners don't have access
75 COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt
77 RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
78 RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt
83 RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
90 RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
[all …]
/external/pytorch/.circleci/scripts/
Dbinary_linux_test.sh3 OUTPUT_SCRIPT=${OUTPUT_SCRIPT:-/home/circleci/project/ci_test_script.sh}
6 if [[ -f /home/circleci/project/env ]]; then
10 # =================== The following code will be executed inside Docker container =================…
11 set -eux -o pipefail
18 if [[ -e "${BINARY_ENV_FILE:-/nofile}" ]]; then
19 source "${BINARY_ENV_FILE:-/nofile}"
22 python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
26 retry conda create -qyn testenv python="$DESIRED_PYTHON"
29 python_path="/opt/python/cp\$python_nodot-cp\${python_nodot}"
31 if [[ -d "\${python_path}/bin" ]]; then
[all …]
/external/pytorch/
DREADME.md1 ![PyTorch Logo](https://github.com/pytorch/pytorch/raw/main/docs/source/_static/img/pytorch-logo-da…
3 --------------------------------------------------------------------------------
5 PyTorch is a Python package that provides two high-level features:
6 - Tensor computation (like NumPy) with strong GPU acceleration
7 - Deep neural networks built on a tape-based autograd system
13 <!-- toc -->
15 - [More About PyTorch](#more-about-pytorch)
16 - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library)
17 - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd)
18 - [Python First](#python-first)
[all …]
D.lintrunner.toml7 'build/**',
33 '--',
39 '--dry-run={{DRYRUN}}',
41 'flake8-bugbear==23.3.23',
42 'flake8-comprehensions==3.15.0',
43 'flake8-executable==2.1.3',
44 'flake8-logging-format==0.9.0',
45 'flake8-pyi==23.3.1',
46 'flake8-simplify==0.19.3',
59 'aten/src/ATen/xpu/**/*.h',
[all …]
/external/pytorch/.ci/pytorch/
Dbuild.sh3 set -ex
6 # (This is set by default in the Docker images we build, so you don't
11 # shellcheck source=./common-build.sh
12 source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
14 if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then
15 exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@"
19 python --version
22 gcc --version
25 cmake --version
32 export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
[all …]
Dtest.sh4 # (This is set by default in the Docker images we build, so you don't
7 set -ex
18 # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96)
19 WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace")
23 echo "For more details refer to https://github.com/sudo-project/sudo/issues/42"
24 sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace
29 sudo chown -R jenkins /var/lib/jenkins/workspace
30 git config --global --add safe.directory /var/lib/jenkins/workspace
36 TORCH_INSTALL_DIR=$(python -c "import site; print(site.getsitepackages()[0])")/torch
41 BUILD_DIR="build"
[all …]
/external/pytorch/.ci/docker/common/
Dinstall_conda.sh3 set -ex
6 if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
8 CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
9 if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
10 BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download"
11 CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
14 MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
15 MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
25 mkdir -p /opt/conda
31 wget -q "${BASE_URL}/${CONDA_FILE}"
[all …]
/external/pytorch/test/
Drun_test.py89 # https://github.com/pytorch/pytorch/pull/85770 added file-granularity parallel testing.
95 # Further, ROCm self-hosted runners have up to 4 GPUs.
288 "TEST_REPORT_SOURCE_OVERRIDE": "dist-mpi",
293 "TEST_REPORT_SOURCE_OVERRIDE": "dist-nccl",
298 "TEST_REPORT_SOURCE_OVERRIDE": "dist-gloo",
303 "TEST_REPORT_SOURCE_OVERRIDE": "dist-ucc",
310 # https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
316 Ninja (https://ninja-build.org) is required for some of the C++ extensions
319 `run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
368 executable = ["coverage", "run", "--parallel-mode", "--source=torch"]
[all …]