Searched +full:docker +full:- +full:build +full:- +full:dir (Results 1 – 25 of 177) sorted by relevance
12345678
/external/pytorch/.github/workflows/ |
D | build-manywheel-images.yml | 1 name: Build manywheel docker images 7 - main 8 - release/* 10 … # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds 11 # Release candidate tags look like: v1.11.0-rc1 12 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ 14 - '.ci/docker/manywheel/*' 15 - '.ci/docker/manywheel/build_scripts/*' 16 - '.ci/docker/common/*' 17 - .github/workflows/build-manywheel-images.yml [all …]
|
D | build-libtorch-images.yml | 1 name: Build libtorch docker images 6 - main 7 - release/* 9 … # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds 10 # Release candidate tags look like: v1.11.0-rc1 11 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ 13 - '.ci/docker/libtorch/*' 14 - '.ci/docker/common/*' 15 - .github/workflows/build-libtorch-images.yml 18 - '.ci/docker/libtorch/*' [all …]
|
D | build-conda-images.yml | 1 name: Build conda docker images 7 - main 8 - release/* 10 … # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds 11 # Release candidate tags look like: v1.11.0-rc1 12 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ 14 - '.ci/docker/conda/*' 15 - '.ci/docker/common/*' 16 - .github/workflows/build-conda-images.yml 19 - '.ci/docker/conda/*' [all …]
|
D | _binary-build-linux.yml | 1 name: linux-binary-build 9 description: The build's name 13 description: The build environment 18 description: Hardware to run this "build"job on, linux.12xlarge or linux.arm64.2xlarge. 19 timeout-minutes: 27 default: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" 56 description: Docker image to use 79 github-token: 84 build: 85 runs-on: ${{ inputs.runs_on }} [all …]
|
/external/coreboot/util/docker/ |
D | Makefile | 2 ## SPDX-License-Identifier: GPL-2.0-only 5 export crossgcc_version=$(shell $(top)/util/crossgcc/buildgcc --version | grep 'cross toolchain' | … 6 export DOCKER:=$(shell $(SHELL) -c "command -v docker") macro 11 # Local cache directory - for storing files shared with the docker image 12 export COREBOOT_JENKINS_CACHE_DIR?=/srv/docker/coreboot-builder/cache 20 # Commit id to build from 21 export DOCKER_COMMIT?=$(shell git log -n 1 --pretty=%h) 23 # .ccache dir to use 29 UID ?= $(shell id -u) 30 GID ?= $(shell id -g) [all …]
|
/external/skia/infra/wasm-common/ |
D | Makefile | 2 # Set the build context to the current work dir, so we can copy 4 docker build -t gold-karma-chrome-tests -f ./docker/gold-karma-chrome-tests/Dockerfile . 7 # Set the build context to the current work dir, so we can copy 9 docker build -t perf-karma-chrome-tests -f ./docker/perf-karma-chrome-tests/Dockerfile . 12 mkdir -p ./tmp 13 CGO_ENABLED=0 GOOS=linux go build -o ./tmp/gold-aggregator -a ./gold/ 14 mkdir -p ./tmp 15 CGO_ENABLED=0 GOOS=linux go build -o ./tmp/perf-aggregator -a ./perf/ 18 # docker run karma-chrome-tests /usr/bin/google-chrome-stable --version 22 docker tag gold-karma-chrome-tests gcr.io/skia-public/gold-karma-chrome-tests:${CHROME_VERSION} [all …]
|
/external/python/google-auth-library-python/.kokoro/ |
D | trampoline_v2.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 20 # 1. Prepare the Docker image for the test 21 # 2. Run the Docker with appropriate flags to run the test 22 # 3. Upload the newly built Docker image 29 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.j… 30 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm 36 # TRAMPOLINE_IMAGE: The docker image to use. 41 # (true|false): Whether to upload the Docker image after the 43 # TRAMPOLINE_BUILD_FILE: The script to run in the docker container. 44 # TRAMPOLINE_WORKSPACE: The workspace path in the docker container. [all …]
|
/external/python/python-api-core/.kokoro/ |
D | trampoline_v2.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 20 # 1. Prepare the Docker image for the test 21 # 2. Run the Docker with appropriate flags to run the test 22 # 3. Upload the newly built Docker image 29 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.j… 30 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm 36 # TRAMPOLINE_IMAGE: The docker image to use. 41 # (true|false): Whether to upload the Docker image after the 43 # TRAMPOLINE_BUILD_FILE: The script to run in the docker container. 44 # TRAMPOLINE_WORKSPACE: The workspace path in the docker container. [all …]
|
/external/python/google-api-python-client/.kokoro/ |
D | trampoline_v2.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 20 # 1. Prepare the Docker image for the test 21 # 2. Run the Docker with appropriate flags to run the test 22 # 3. Upload the newly built Docker image 29 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.j… 30 # gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm 36 # TRAMPOLINE_IMAGE: The docker image to use. 41 # (true|false): Whether to upload the Docker image after the 43 # TRAMPOLINE_BUILD_FILE: The script to run in the docker container. 44 # TRAMPOLINE_WORKSPACE: The workspace path in the docker container. [all …]
|
/external/angle/third_party/spirv-tools/src/kokoro/scripts/linux/ |
D | build.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 16 # Linux Build Script. 19 set -e 27 BUILD_SHA=${KOKORO_GITHUB_COMMIT:-$KOKORO_GITHUB_PULL_REQUEST_COMMIT} 30 # Docker creates files with the root user - this can upset the Kokoro artifact copier. 32 dir=$1 33 if [[ -d "$dir" ]]; then 34 sudo chown -R "$(id -u):$(id -g)" "$dir" 39 # Allow build failures 41 # "--privileged" is required to run ptrace in the asan builds. [all …]
|
/external/skia/bazel/ |
D | skia_app_container.bzl | 4 load("@io_bazel_rules_docker//docker/util:run.bzl", "container_run_and_commit") 17 """Builds a Docker container for a Skia app, and generates a target to push it to GCR. 20 * "<name>" target to build the Docker container with skia as default user. 31 # //myapp/BUILD.bazel 47 repository = "skia-public/myapp", 51 The above example will produce a Docker container based on gcr.io/skia-public/basealpine with 54 - /usr/local/bin/myapp/mybinary (mode: 755) 55 - /usr/local/share/myapp/config.cfg (mode: 644) 56 - /usr/local/share/myapp/data.json (mode: 644) 58 To build the container and load it into Docker: [all …]
|
/external/skia/infra/bots/task_drivers/push_apps_from_skia_image/ |
D | push_apps_from_skia_image.go | 3 // Use of this source code is governed by a BSD-style license that can be 6 // This executable builds the Docker images based off the Skia executables in the 7 // gcr.io/skia-public/skia-release image. It then issues a PubSub notification to have those apps 25 docker_pubsub "go.skia.org/infra/go/docker/build/pubsub" 30 "go.skia.org/infra/task_driver/go/lib/docker" 50 … "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.") 58 func buildPushFiddlerImage(ctx context.Context, dkr *docker.Docker, tag, infraCheckoutDir string, t… argument 59 // Run skia-release image and extract products out of /tmp/skia/skia. See 60 …// https://skia.googlesource.com/skia/+/0e845dc8b05cb2d40d1c880184e33dd76081283a/docker/skia-relea… 68 …skiaCopyCmd := []string{"/bin/sh", "-c", "cd /tmp; tar cvzf skia.tar.gz --directory=/tmp/skia skia… [all …]
|
/external/swiftshader/third_party/SPIRV-Tools/kokoro/scripts/linux/ |
D | build.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 16 # Linux Build Script. 19 set -e 27 BUILD_SHA=${KOKORO_GITHUB_COMMIT:-$KOKORO_GITHUB_PULL_REQUEST_COMMIT} 30 # Docker creates files with the root user - this can upset the Kokoro artifact copier. 32 dir=$1 33 if [[ -d "$dir" ]]; then 34 sudo chown -R "$(id -u):$(id -g)" "$dir" 39 # Allow build failures 41 # "--privileged" is required to run ptrace in the asan builds. [all …]
|
/external/deqp-deps/SPIRV-Tools/kokoro/scripts/linux/ |
D | build.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 16 # Linux Build Script. 19 set -e 27 BUILD_SHA=${KOKORO_GITHUB_COMMIT:-$KOKORO_GITHUB_PULL_REQUEST_COMMIT} 30 # Docker creates files with the root user - this can upset the Kokoro artifact copier. 32 dir=$1 33 if [[ -d "$dir" ]]; then 34 sudo chown -R "$(id -u):$(id -g)" "$dir" 39 # Allow build failures 41 # "--privileged" is required to run ptrace in the asan builds. [all …]
|
/external/rust/android-crates-io/crates/grpcio-sys/grpc/tools/run_tests/artifacts/ |
D | build_artifact_python.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 16 set -ex 21 export PYTHON=${PYTHON:-python} 22 export AUDITWHEEL=${AUDITWHEEL:-auditwheel} 28 # Needed for building binary distribution wheels -- bdist_wheel 29 "${PYTHON}" -m pip install --upgrade wheel 33 # Install Cython to avoid source wheel build failure. 34 # This only needs to be done when not running under docker (=on MacOS) 35 # since the docker images used for building python wheels 36 # already have a new-enough version of cython pre-installed. [all …]
|
/external/grpc-grpc/tools/run_tests/artifacts/ |
D | build_artifact_python.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 16 set -ex 21 export PYTHON=${PYTHON:-python} 22 export AUDITWHEEL=${AUDITWHEEL:-auditwheel} 28 # Needed for building binary distribution wheels -- bdist_wheel 29 "${PYTHON}" -m pip install --upgrade pip wheel setuptools 33 # Install Cython to avoid source wheel build failure. 34 # This only needs to be done when not running under docker (=on MacOS) 35 # since the docker images used for building python wheels 36 # already have a new-enough version of cython pre-installed. [all …]
|
/external/tensorflow/tensorflow/lite/tools/pip_package/ |
D | Makefile | 7 # http://www.apache.org/licenses/LICENSE-2.0 21 # Values: according to https://www.python.org/dev/peps/pep-0440/ 24 MAKEFILE_DIR := $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) 26 TAG_IMAGE := "tflite-runtime-builder-$(subst :,-,$(BASE_IMAGE))" 28 DOCKER_PARAMS := --pid=host \ 29 --env "CI_BUILD_USER=$(shell id -u -n)" \ 30 --env "CI_BUILD_UID=$(shell id -u)" \ 31 --env "CI_BUILD_GROUP=$(shell id -g -n)" \ 32 --env "CI_BUILD_GID=$(shell id -g)" \ 33 --env "CI_BUILD_HOME=$(TENSORFLOW_DIR)/bazel-ci_build-cache" \ [all …]
|
/external/cldr/tools/scripts/web/docker/ |
D | README.md | 1 # Docker for CLDR Site 5 1. run `npm i` and `npm run build` in `docs/site` 9 1. install https://docker.io 10 2. `docker compose up` 12 4. hit control-C to cancel the docker run. 16 1. `docker compose run -w /src site jekyll build` 17 2. output is in `./_site` here in this dir.
|
/external/executorch/.github/workflows/ |
D | trunk.yml | 6 - main 7 - release/* 9 - ciflow/trunk/* 12 - .ci/docker/ci_commit_pins/pytorch.txt 13 - .ci/scripts/** 17 …p: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_n… 18 cancel-in-progress: true 21 gather-models: 22 runs-on: ubuntu-22.04 24 models: ${{ steps.gather-models.outputs.models }} [all …]
|
/external/skia/infra/lottiecap/ |
D | Makefile | 1 gold-docker-image: aggregator 2 # Set the build context to the current work dir, so we can copy 4 docker build -t gold-lottie-web-puppeteer -f ./docker/gold-lottie-web-puppeteer/Dockerfile . 7 mkdir -p ./tmp 8 CGO_ENABLED=0 GOOS=linux go build -o ./tmp/gold-aggregator -a ./gold/
|
/external/google-cloud-java/java-cloudbuild/proto-google-cloud-build-v1/src/main/java/com/google/cloudbuild/v1/ |
D | BuildStepOrBuilder.java | 8 * https://www.apache.org/licenses/LICENSE-2.0 31 * build step. 32 * If the image is available in the host's Docker daemon's cache, it 35 * The Docker daemon's cache will already have the latest versions of all of 36 * the officially supported build steps 37 …* ([https://github.com/GoogleCloudPlatform/cloud-builders](https://github.com/GoogleCloudPlatform/… 38 * The Docker daemon will also have cached many of the layers for some popular 41 * If you built an image in a previous build step, it will be stored in the 42 * host's Docker daemon's cache and is available to use as the name for a 43 * later build step. [all …]
|
/external/coreboot/util/docker/coreboot-jenkins-node/ |
D | Dockerfile | 1 # This dockerfile is not meant to be used directly by docker. The 3 # the docker image for this file by running: 5 # make coreboot-jenkins-node 12 # SSH_KEY is the contents of the file coreboot-jenkins-node/authorized_keys 14 # docker build command, the 'COPY' keyword isn't valid. 16 FROM coreboot/coreboot-sdk:{{SDK_VERSION}} 19 RUN apt-get -y update && \ 20 apt-get -y install \ 21 default-jre-headless \ 22 libcmocka-dev \ [all …]
|
/external/libcxx/utils/docker/ |
D | build_docker_image.sh | 2 #===- libcxx/utils/docker/build_docker_image.sh ----------------------------===// 9 #===----------------------------------------------------------------------===// 10 set -e 18 Usage: build_docker_image.sh [options] [-- [cmake_args]...] 22 -h|--help show this help message 23 Docker-specific: 24 -s|--source image source dir (i.e. debian8, nvidia-cuda, etc) 25 -d|--docker-repository docker repository for the image 26 -t|--docker-tag docker tag for the image 28 Required options: --source and --docker-repository. [all …]
|
/external/skia/infra/bots/assets/mesa_intel_driver_linux/ |
D | README.md | 4 Using the automated asset python scripts requires that Docker be installed. 10 Using Docker 11 ------------ 12 It is easiest to just use the pre-built docker image. 14 …docker run -v /tmp/out:/OUT -e MESA_VERSION=18.1.7 gcr.io/skia-public/mesa-driver-builder:latest /… 22 -------------------- 23 If Docker is not installed, these steps may be used to build the driver. 24 This is known to work on Ubuntu 18.04, but is stale since we use the Docker container 25 for day-to-day builds. 29 …-get install autoconf libtool scons flex bison llvm-dev libpthread-stubs0-dev x11proto-gl-dev libd… [all …]
|
/external/tensorflow/tensorflow/tools/pip_package/xla_build/pip_test/ |
D | run_xla_aot_test.sh | 8 # http://www.apache.org/licenses/LICENSE-2.0 20 # it *can* be run outside docker/kokoro, on a dev machine, as long as cmake and 21 # ninja-build packages are installed, and the tensorflow PIP package (the one 26 # under kokoro, this is run by learning/brain/testing/kokoro/rel/docker/aot_compile.sh 28 set -euo pipefail -o history 33 --out_dir=/tmp/saved_models 44 rm -rf "${GEN_ROOT}" "${PROJECT}" "${TF_THIRD_PARTY}" 46 # We don't want to -Itensorflow, to avoid unwanted dependencies. 48 mkdir -p "${TF_THIRD_PARTY}" 49 cp -rf third_party "${TF_THIRD_PARTY}/" [all …]
|
12345678