Home
last modified time | relevance | path

Searched +full:- +full:- +full:workflow +full:- +full:run +full:- +full:attempt (Results 1 – 25 of 114) sorted by relevance

12345

/external/pytorch/.github/workflows/
Dupload-test-stats.yml5 …trunk, periodic, inductor, unstable, slow, unstable-periodic, inductor-periodic, rocm, inductor-mi…
7 - completed
11 …ion adapted from https://github.com/community/community/discussions/21090#discussioncomment-3226271
14 runs-on: ubuntu-latest
18 - name: Get workflow run conclusion
19 uses: octokit/request-action@v2.1.0
26 upload-test-stats:
32 runs-on: ubuntu-22.04
33 environment: upload-stats
34 …name: Upload test stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow…
[all …]
Dupload-torch-dynamo-perf-stats.yml5 …workflows: [inductor-A100-perf-nightly, inductor-perf-nightly-A10g, inductor-perf-nightly-aarch64,…
7 - completed
10 get-conclusion:
11 runs-on: ubuntu-latest
13 conclusion: ${{ fromJson(steps.get-conclusion.outputs.data).conclusion }}
15 - name: Get workflow run conclusion
16 uses: octokit/request-action@v2.1.0
17 id: get-conclusion
23 upload-perf-stats:
24 needs: get-conclusion
[all …]
Dupload_test_stats_intermediate.yml7 description: workflow_id of the run
10 description: workflow_run_attempt of the run
16 runs-on: ubuntu-22.04
17 environment: upload-stats
19 - name: Checkout PyTorch
20 uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.4
22 fetch-depth: 1
25 - uses: actions/setup-python@v4
27 python-version: '3.11'
30 - run: |
[all …]
/external/pytorch/.github/actions/upload-test-artifacts/
Daction.yml6 use-gha:
9 file-suffix:
12 workflow job id, see [Job id in artifacts].
14 s3-bucket:
17 default: "gha-artifacts"
23 - name: Zip JSONs for upload
24 if: runner.os != 'Windows' && !inputs.use-gha
27 FILE_SUFFIX: ${{ inputs.file-suffix }}
28 run: |
30 rm -f test-jsons-*.zip
[all …]
/external/pytorch/tools/stats/
Dupload_test_stats_intermediate.py11 "--workflow-run-id",
13 help="id of the workflow to get artifacts from",
16 "--workflow-run-attempt",
19 help="which retry of the workflow this is",
23 print(f"Workflow id is: {args.workflow_run_id}")
Dupload_artifacts.py10 "sccache-stats",
11 "test-jsons",
12 "test-reports",
13 "usage-log",
15 BUCKET_NAME = "gha-artifacts"
16 FILENAME_REGEX = r"-runattempt\d+"
19 def get_artifacts(repo: str, workflow_run_id: int, workflow_run_attempt: int) -> None:
30 … # GHA artifact is named as follows: NAME-runattempt${{ github.run_attempt }}-SUFFIX.zip
32 # pytorch/pytorch/WORKFLOW_ID/RUN_ATTEMPT/artifact/NAME-SUFFIX.zip
44 "--workflow-run-id",
[all …]
Dupload_sccache_stats.py18 ) -> list[dict[str, Any]]:
24 download_s3_artifacts("sccache-stats", workflow_run_id, workflow_run_attempt)
36 "--workflow-run-id",
39 help="id of the workflow to get artifacts from",
42 "--workflow-run-attempt",
45 help="which retry of the workflow this is",
Dupload_stats_lib.py27 def _get_request_headers() -> dict[str, str]:
34 def _get_artifact_urls(prefix: str, workflow_run_id: int) -> dict[Path, str]:
35 """Get all workflow artifacts with 'test-report' in the name."""
56 ) -> Path:
57 # [Artifact run attempt]
58 # All artifacts on a workflow share a single namespace. However, we can
59 # re-run a workflow and produce a new set of artifacts. To avoid name
60 # collisions, we add `-runattempt1<run #>-` somewhere in the artifact name.
62 # This code parses out the run attempt number from the artifact name. If it
64 atoms = str(artifact_name).split("-")
[all …]
Dupload_dynamo_perf_stats.py22 "test-reports",
25 r"test-reports-test-(?P<name>[\w\-]+)-\d+-\d+-(?P<runner>[\w\.-]+)_(?P<job>\d+).zip"
35 ) -> list[dict[str, Any]]:
73 "workflow_id": workflow_run_id, # type: ignore[dict-item]
74 "run_attempt": workflow_run_attempt, # type: ignore[dict-item]
90 def generate_partition_key(repo: str, doc: Dict[str, Any]) -> str:
99 hash_content = hashlib.md5(json.dumps(doc).encode("utf-8")).hexdigest()
108 "--workflow-run-id",
111 help="id of the workflow to get perf stats from",
114 "--workflow-run-attempt",
[all …]
Dcheck_disabled_tests.py26 ) -> dict[str, dict[str, int]]:
28 Return a list of disabled tests that should be re-enabled and those that are still
35 # * Success test should be re-enable if it's green after rerunning in all platforms
37 # * Failures from pytest because pytest-flakefinder is used to run the same test
47 # Under --rerun-disabled-tests mode, a test is skipped when:
58 # ignore this case as returning a list of subskips only happens when tests are run
82 # Under --rerun-disabled-tests mode, if a test is not skipped or failed, it's
103 ) -> Generator[Path, None, None]:
106 test reports are from rerun_disabled_tests workflow because the name doesn't include the
114 "test-reports", workflow_run_id, workflow_run_attempt
[all …]
Dupload_test_stats.py26 ) -> list[dict[str, Any]]:
27 """Convert a test report xml file into a JSON-serializable list of test cases."""
58 def process_xml_element(element: ET.Element) -> dict[str, Any]:
59 """Convert a test suite element into a JSON-serializable dict."""
115 def get_tests(workflow_run_id: int, workflow_run_attempt: int) -> list[dict[str, Any]]:
122 "test-report", workflow_run_id, workflow_run_attempt
151 ) -> list[dict[str, Any]]:
154 for xml_report in Path(".").glob("**/test/test-reports/**/*.xml"):
164 def summarize_test_cases(test_cases: list[dict[str, Any]]) -> list[dict[str, Any]]:
166 manually instead of using the `test-suite` XML tag because xmlrunner does
[all …]
Dupload_metrics.py28 "arn:aws:dynamodb:us-east-1:308535385114:table/torchci-metrics"
45 ) -> None:
51 def value(self) -> Any:
73 def add_global_metric(metric_name: str, metric_value: Any) -> None:
85 ) -> None:
89 Even if EMIT_METRICS is set to False, this function will still run the code to
95 and be emitted just once per run attempt.
110 # We use these env vars that to determine basic info about the workflow run.
115 EnvVarMetric("workflow", "GITHUB_WORKFLOW"),
128 calling_frame = inspect.currentframe().f_back # type: ignore[union-attr]
[all …]
/external/pytorch/torch/_logging/
Dscribe.py6 from fbscribelogger import make_scribe_logger # type: ignore[import-untyped]
12 def make_scribe_logger(name: str, thrift_src: str) -> Callable[..., None]:
13 def inner(**kwargs: TLazyField) -> None:
24 …# The commit SHA that triggered the workflow, e.g., 02a6b1d30f338206a71d0b75bfa09d85fac0028a. Deri…
30 …# The fully-formed ref of the branch or tag that triggered the workflow run, e.g., refs/pull/13389…
33 …protections or rulesets are configured for the ref that triggered the workflow run. Derived from G…
36 …# A unique number for each attempt of a particular workflow run in a repository, e.g., 1. Derived …
39 …# A unique number for each workflow run within a repository, e.g., 19471190684. Derived from GITHU…
42 …# A unique number for each run of a particular workflow in a repository, e.g., 238742. Derived fro…
45 …# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (defaul…
/external/googleapis/google/cloud/integrations/v1alpha/
Dlog_entries.proto7 // http://www.apache.org/licenses/LICENSE-2.0
68 // Errors, warnings, and informationals associated with the workflow/task.
69 // The order in which the errors were added by the workflow/task is
79 // Auto-generated primary key.
147 // Status for the current execution attempt.
169 // the integration execution attempt number this snapshot belongs to.
172 // the task attempt number this snapshot belongs to. Could be empty.
178 // Ancestor task number for the task(it will only be non-empty if the task
179 // is under 'private workflow')
182 // Ancestor iteration number for the task(it will only be non-empty if the
[all …]
/external/googleapis/google/dataflow/v1beta3/
Denvironment.proto7 // http://www.apache.org/licenses/LICENSE-2.0
34 // storage. The system will append the suffix "/temp-{JOBNAME} to
49 // unspecified, the service will attempt to choose a reasonable
79 // are required in order to run the job.
82 // The dataset for the current project where various workflow
100 // Identity to run virtual machines as. Defaults to the default account.
103 // Which Flexible Resource Scheduling mode to run in.
107 // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
108 // which worker processing should occur, e.g. "us-west1". Mutually exclusive
114 // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
[all …]
/external/google-cloud-java/java-dataflow/proto-google-cloud-dataflow-v1beta3/src/main/proto/google/dataflow/v1beta3/
Denvironment.proto7 // http://www.apache.org/licenses/LICENSE-2.0
34 // storage. The system will append the suffix "/temp-{JOBNAME} to
49 // unspecified, the service will attempt to choose a reasonable
79 // are required in order to run the job.
82 // The dataset for the current project where various workflow
100 // Identity to run virtual machines as. Defaults to the default account.
103 // Which Flexible Resource Scheduling mode to run in.
107 // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
108 // which worker processing should occur, e.g. "us-west1". Mutually exclusive
114 // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
[all …]
/external/pytorch/benchmarks/dynamo/pr_time_benchmarks/
Dbenchmark_base.py16 …# The commit SHA that triggered the workflow, e.g., 02a6b1d30f338206a71d0b75bfa09d85fac0028a. Deri…
27 …# A unique number for each workflow run within a repository, e.g., 19471190684. Derived from GITHU…
30 …# A unique number for each attempt of a particular workflow run in a repository, e.g., 1. Derived …
33 …protections or rulesets are configured for the ref that triggered the workflow run. Derived from G…
36 …# The fully-formed ref of the branch or tag that triggered the workflow run, e.g., refs/pull/13389…
42 …# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (defaul…
48 …# A unique number for each run of a particular workflow in a repository, e.g., 238742. Derived fro…
/external/google-cloud-java/java-dataflow/proto-google-cloud-dataflow-v1beta3/src/main/java/com/google/dataflow/v1beta3/
DWorkerPoolOrBuilder.java8 * https://www.apache.org/licenses/LICENSE-2.0
59 * attempt to choose a reasonable default.
152 * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
153 * service will attempt to choose a reasonable default.
165 * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
166 * service will attempt to choose a reasonable default.
187 * continue to run and use Google Compute Engine VM resources in the
191 * If unknown or unspecified, the service will attempt to choose a reasonable
212 * continue to run and use Google Compute Engine VM resources in the
216 * If unknown or unspecified, the service will attempt to choose a reasonable
[all …]
/external/executorch/.github/scripts/
Dextract_benchmark_results.py5 # This source code is licensed under the BSD-style license found in the
24 ARTIFACTS_FILENAME_REGEX = re.compile(r"(android|ios)-artifacts-(?P<job_id>\d+).json")
26 # iOS-related regexes and variables
28 …r"Test Case\s+'-\[(?P<test_class>\w+)\s+(?P<test_name>[\w\+]+)\]'\s+measured\s+\[(?P<metric>.+)\]\…
46 ) -> None:
61 ) -> None:
69 def parse_args() -> Any:
74 "--artifacts",
81 "--output-dir",
88 "--repo",
[all …]
/external/executorch/.github/workflows/
Dandroid-perf.yml1 name: android-perf
5 - cron: 0 0 * * *
15 description: Target devices to run benchmark
25 description: Run with threadpool?
45 description: Target devices to run benchmark
55 description: Run with threadpool?
69workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'bra…
70 cancel-in-progress: true
73 set-parameters:
74 runs-on: linux.2xlarge
[all …]
/external/bazelbuild-rules_rust/crate_universe/
DDEVELOPMENT.md5 Crate Universe repository rules are backed by a binary called `cargo-bazel`.
10 cargo build --bin=cargo-bazel
17 export CARGO_BAZEL_GENERATOR_URL=file://$(pwd)/target/debug/cargo-bazel
20 From here on, the repository rule can be run
22 ## Using non-release rules_rust
25 releases page (e.g. using an archive from a commit or branch) then `cargo-bazel`
27 It's highly recommended to build `cargo-bazel` binaries yourself and host them
29 attempt to build the binary using [cargo_bootstrap_repository][cbr] as a fallback.
30 This is very time consuming and in no way the recommended workflow for anything
38 re-vendor them all, a bash script is provided:
[all …]
/external/googleapis/google/cloud/dataform/v1beta1/
Ddataform.proto7 // http://www.apache.org/licenses/LICENSE-2.0
46 "https://www.googleapis.com/auth/cloud-platform";
562 // `schema_suffix` and `table_prefix` can have a special expression -
568 // when creating workspace-scoped compilation results.
584 // Optional. The repository's user-friendly name.
604 // creating workspace-scoped compilation results. See documentation for
615 …// https://cloud.google.com/dataform/reference/rest#rest-resource:-v1beta1.projects.locations.repo…
621 // Optional. The service account to run workflow invocations under.
1418 // A record of an attempt to create a compilation result for this release
1421 // The timestamp of this release attempt.
[all …]
/external/python/google-api-python-client/docs/dyn/
Ddataflow_v1b3.projects.jobs.html8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
16 font-size: 13px;
21 font-size: 26px;
22 margin-bottom: 1em;
26 font-size: 24px;
27 margin-bottom: 1em;
[all …]
Ddataflow_v1b3.projects.locations.jobs.html8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
16 font-size: 13px;
21 font-size: 26px;
22 margin-bottom: 1em;
26 font-size: 24px;
27 margin-bottom: 1em;
[all …]
/external/aws-crt-java/.github/workflows/
Drun_android_ci.py7 import requests # - for uploading files
10 parser = argparse.ArgumentParser(description="Utility script to upload and run Android Device tests…
11 parser.add_argument('--run_id', required=True, help="A unique number for each workflow run within a…
12 parser.add_argument('--run_attempt', required=True, help="A unique number for each attempt of a par…
13 parser.add_argument('--project_arn', required=True, help="Arn for the Device Farm Project the apk w…
14 parser.add_argument('--device_pool_arn', required=True, help="Arn for device pool of the Device Far…
17 …= current_working_directory + '/src/test/android/testapp/build/outputs/apk/debug/testapp-debug.apk'
18 …ory + '/src/test/android/testapp/build/outputs/apk/androidTest/debug/testapp-debug-androidTest.apk'
36 print("Error - could not make Boto3 client. Credentials likely could not be sourced")
37 sys.exit(-1)
[all …]

12345