Home
last modified time | relevance | path

Searched full:shard (Results 1 – 25 of 1154) sorted by relevance

12345678910>>...47

/external/pytorch/tools/test/
Dtest_test_selections.py85 test="super_long_test", shard=1, num_shards=1, time=None
87 ShardedTest(test="long_test2", shard=1, num_shards=1, time=None),
88 ShardedTest(test="normal_test2", shard=1, num_shards=1, time=None),
89 ShardedTest(test="short_test1", shard=1, num_shards=1, time=None),
90 ShardedTest(test="short_test3", shard=1, num_shards=1, time=None),
91 ShardedTest(test="short_test5", shard=1, num_shards=1, time=None),
97 ShardedTest(test="long_test1", shard=1, num_shards=1, time=None),
98 ShardedTest(test="normal_test1", shard=1, num_shards=1, time=None),
99 ShardedTest(test="normal_test3", shard=1, num_shards=1, time=None),
100 ShardedTest(test="short_test2", shard=1, num_shards=1, time=None),
[all …]
/external/pytorch/torch/distributed/tensor/_ops/
D_matrix_ops.py26 from torch.distributed.tensor.placement_types import Placement, Replicate, Shard
40 # follow the input spec but transpose the Shard placements
42 Shard(1 - p.dim) if isinstance(p, Shard) else p
198 # shard on the num of head dim
199 qkv_sharding = Shard(1) # num head dim
200 output_sharding = Shard(1) # num head dim
201 logsumexp_sharding = Shard(1) # num head dim
203 debug_attn_mask_sharding: Placement = Shard(1) # num head dim
227 Shard(2), # output
228 Shard(2), # logsumexp
[all …]
/external/rust/android-crates-io/crates/grpcio-sys/grpc/src/core/lib/iomgr/
Dtimer_generic.cc54 // A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
68 // The deadline of the next timer due in this shard.
81 // is hashed to select the timer shard to add the timer to
85 // the deadline of the next timer in each shard).
181 // If a timer is added to a timer shard (either heap or a list), it must
183 // timer shard.
235 static grpc_core::Timestamp compute_min_deadline(timer_shard* shard) { in compute_min_deadline() argument
236 return grpc_timer_heap_is_empty(&shard->heap) in compute_min_deadline()
237 ? shard->queue_deadline_cap + grpc_core::Duration::Epsilon() in compute_min_deadline()
239 grpc_timer_heap_top(&shard->heap)->deadline); in compute_min_deadline()
[all …]
/external/grpc-grpc/src/core/lib/iomgr/
Dtimer_generic.cc54 // A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
68 // The deadline of the next timer due in this shard.
81 // is hashed to select the timer shard to add the timer to
85 // the deadline of the next timer in each shard).
181 // If a timer is added to a timer shard (either heap or a list), it must
183 // timer shard.
235 static grpc_core::Timestamp compute_min_deadline(timer_shard* shard) { in compute_min_deadline() argument
236 return grpc_timer_heap_is_empty(&shard->heap) in compute_min_deadline()
237 ? shard->queue_deadline_cap + grpc_core::Duration::Epsilon() in compute_min_deadline()
239 grpc_timer_heap_top(&shard->heap)->deadline); in compute_min_deadline()
[all …]
/external/grpc-grpc/src/core/lib/event_engine/posix_engine/
Dtimer.cc42 grpc_core::Timestamp TimerList::Shard::ComputeMinDeadline() { in ComputeMinDeadline()
49 TimerList::Shard::Shard() : stats(1.0 / kAddDeadlineScale, 0.1, 0.5) {} in Shard() function in grpc_event_engine::experimental::TimerList::Shard
55 shards_(new Shard[num_shards_]), in TimerList()
56 shard_queue_(new Shard*[num_shards_]) { in TimerList()
58 Shard& shard = shards_[i]; in TimerList() local
59 shard.queue_deadline_cap = in TimerList()
62 shard.shard_queue_index = i; in TimerList()
63 shard.list.next = shard.list.prev = &shard.list; in TimerList()
64 shard.min_deadline = shard.ComputeMinDeadline(); in TimerList()
65 shard_queue_[i] = &shard; in TimerList()
[all …]
/external/rust/android-crates-io/crates/grpcio-sys/grpc/src/core/lib/event_engine/posix_engine/
Dtimer.cc42 grpc_core::Timestamp TimerList::Shard::ComputeMinDeadline() { in ComputeMinDeadline()
49 TimerList::Shard::Shard() : stats(1.0 / kAddDeadlineScale, 0.1, 0.5) {} in Shard() function in grpc_event_engine::experimental::TimerList::Shard
55 shards_(new Shard[num_shards_]), in TimerList()
56 shard_queue_(new Shard*[num_shards_]) { in TimerList()
58 Shard& shard = shards_[i]; in TimerList() local
59 shard.queue_deadline_cap = in TimerList()
62 shard.shard_queue_index = i; in TimerList()
63 shard.list.next = shard.list.prev = &shard.list; in TimerList()
64 shard.min_deadline = shard.ComputeMinDeadline(); in TimerList()
65 shard_queue_[i] = &shard; in TimerList()
[all …]
/external/pytorch/test/distributed/_tensor/
Dtest_redistribute.py8 from torch.distributed._tensor.placement_types import Partial, Replicate, Shard
29 # 1) test shard -> replicate forward
44 shard_spec = [Shard(shard_dim)]
57 # 2) test shard -> replicate backward:
58 # should give gradient as shard
132 shard_spec = [Shard(shard_dim)]
133 # 1) test replicate -> shard forward
151 # 2) test replicate -> shard backward:
265 shard_spec = [Shard(shard_dim)]
287 # test partial to shard, trigger reduce_scatter
[all …]
Dtest_utils.py14 from torch.distributed.tensor.placement_types import _StridedShard, Replicate, Shard
38 # replicate, shard
39 placements2 = [Replicate(), Shard(0)]
46 # shard, shard
47 placements3 = [Shard(0), Shard(1)]
62 one_d_placements = [[Shard(0)], [Replicate()]]
65 # When the placements is [Shard(0)], we test for three different scenarios:
93 two_d_placements_options = [Shard(0), Shard(1), Replicate()]
133 # local shard shape is [2, 2]
135 placements = [_StridedShard(0, split_factor=tp_size), Shard(0)]
[all …]
Dtest_dtensor.py21 Shard,
62 placements = [Shard(0)]
92 dist_specs = [[Shard(0)], [Replicate()]]
150 shard0_spec = [Shard(0)]
157 shard1_spec = [Shard(1)]
176 placements = [Shard(0)]
217 shard_placement = Shard(0)
228 (Shard(0),),
243 shard_placement = Shard(0)
257 (Shard(0),),
[all …]
/external/pytorch/.github/workflows/
Dinductor-perf-test-nightly-aarch64.yml62 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 1, num_shards: 9, runner: "linux.arm64.m…
63 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 2, num_shards: 9, runner: "linux.arm64.m…
64 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 3, num_shards: 9, runner: "linux.arm64.m…
65 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 4, num_shards: 9, runner: "linux.arm64.m…
66 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 5, num_shards: 9, runner: "linux.arm64.m…
67 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 6, num_shards: 9, runner: "linux.arm64.m…
68 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 7, num_shards: 9, runner: "linux.arm64.m…
69 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 8, num_shards: 9, runner: "linux.arm64.m…
70 …{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 9, num_shards: 9, runner: "linux.arm64.m…
71 …{ config: "inductor_timm_perf_cpu_aarch64", shard: 1, num_shards: 15, runner: "linux.arm64.m7g.me…
[all …]
Dinductor.yml41 …{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-typ…
42 …{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-typ…
43 …{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outpu…
44 …{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outpu…
45 …{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.labe…
46 …{ config: "inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.labe…
47 …{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.output…
48 …{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.output…
49 …{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-ty…
50 …{ config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outp…
[all …]
Dpull.yml46 { config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
47 { config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
48 { config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
49 { config: "docs_test", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
50 { config: "jit_legacy", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
51 { config: "backwards_compat", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
52 { config: "distributed", shard: 1, num_shards: 2, runner: "am2.linux.2xlarge" },
53 { config: "distributed", shard: 2, num_shards: 2, runner: "am2.linux.2xlarge" },
83 { config: "default", shard: 1, num_shards: 1 },
94 { config: "default", shard: 1, num_shards: 1 },
[all …]
Dtrunk.yml46 { config: "default", shard: 1, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
47 { config: "default", shard: 2, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
48 { config: "default", shard: 3, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
49 { config: "default", shard: 4, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
50 { config: "default", shard: 5, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
74 { config: "default", shard: 1, num_shards: 1 },
86 { config: "default", shard: 1, num_shards: 1 },
99 { config: "default", shard: 1, num_shards: 1 },
111 { config: "default", shard: 1, num_shards: 1 },
122 { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
[all …]
Dinductor-cu124.yml32 { config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
33 { config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
34 …{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" …
35 …{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
36 … { config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
37 … { config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
38 …{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidi…
39 …{ config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" …
40 …{ config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" …
41 …{ config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia…
[all …]
Dperiodic.yml48 { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
49 { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
50 { config: "jit_legacy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
71 { config: "default", shard: 1, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
72 { config: "default", shard: 2, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
73 { config: "default", shard: 3, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
74 { config: "default", shard: 4, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
75 { config: "default", shard: 5, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
76 { config: "deploy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
77 { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
[all …]
Dinductor-periodic.yml30 …{ config: "dynamo_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu…
31 …{ config: "dynamo_eager_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu…
32 …{ config: "dynamo_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gp…
33 … { config: "dynamo_eager_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
34 … { config: "dynamo_eager_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
35 …{ config: "aot_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
36 …{ config: "aot_eager_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
37 …{ config: "aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" …
38 … { config: "aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
39 … { config: "aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
[all …]
/external/pytorch/torch/csrc/distributed/autograd/context/
Dcontainer.cpp114 auto& shard = getShard(context_id); in getOrCreateContext() local
115 std::lock_guard<std::mutex> guard(shard.lock); in getOrCreateContext()
116 auto it = shard.contexts.find(context_id); in getOrCreateContext()
117 if (it != shard.contexts.end()) { in getOrCreateContext()
122 shard.contexts in getOrCreateContext()
147 auto& shard = getShard(context_id); in newContext() local
148 std::lock_guard<std::mutex> guard(shard.lock); in newContext()
150 shard.contexts in newContext()
172 auto& shard = getShard(current_context_id_); in currentContext() local
173 std::lock_guard<std::mutex> guard(shard.lock); in currentContext()
[all …]
/external/cronet/stable/build/config/fuchsia/test/
DREADME.md4 Fuchsia tests hermetically. Tests start from `minimum.shard.test-cml` and add
11 #### archivist.shard.test-cml
15 #### chromium_test_facet.shard.test-cml
22 #### fonts.shard.test-cml
24 This shard runs an isolated font provider with fonts bundled into the fonts
27 #### test_fonts.shard.test-cml
29 (beyond that provided by `fonts.shard.test-cml`). This shard requires fonts to
36 The user of this shard must provide a directory `/pkg/test_fonts`, which must
42 #### mark_vmo_executable.shard.test-cml
46 #### minimum.shard.test-cml
[all …]
/external/cronet/tot/build/config/fuchsia/test/
DREADME.md4 Fuchsia tests hermetically. Tests start from `minimum.shard.test-cml` and add
11 #### archivist.shard.test-cml
15 #### chromium_test_facet.shard.test-cml
22 #### fonts.shard.test-cml
24 This shard runs an isolated font provider with fonts bundled into the fonts
27 #### test_fonts.shard.test-cml
29 (beyond that provided by `fonts.shard.test-cml`). This shard requires fonts to
36 The user of this shard must provide a directory `/pkg/test_fonts`, which must
42 #### mark_vmo_executable.shard.test-cml
46 #### minimum.shard.test-cml
[all …]
/external/angle/build/config/fuchsia/test/
DREADME.md4 Fuchsia tests hermetically. Tests start from `minimum.shard.test-cml` and add
11 #### archivist.shard.test-cml
15 #### chromium_test_facet.shard.test-cml
22 #### fonts.shard.test-cml
24 This shard runs an isolated font provider with fonts bundled into the fonts
27 #### test_fonts.shard.test-cml
29 (beyond that provided by `fonts.shard.test-cml`). This shard requires fonts to
36 The user of this shard must provide a directory `/pkg/test_fonts`, which must
42 #### mark_vmo_executable.shard.test-cml
46 #### minimum.shard.test-cml
[all …]
/external/pytorch/torch/distributed/_shard/sharded_tensor/
Dshard.py11 class Shard: class
13 Container which holds the data for a shard as a Tensor and also
14 the associated metadata for that shard.
17 tensor(torch.Tensor): Local tensor for the shard.
19 The metadata for the shard, including offsets, lengths and device placement.
30 "Shard tensor size does not match with metadata.shard_lengths! "
31 f"Found shard tensor size: {list(self.tensor.size())}, "
40 f"Local shard tensor device does not match with local Shard's placement! "
41 f"Found local shard tensor device: {self.tensor.device}, "
42 f"local shard metadata placement device: {placement_device.device()}"
[all …]
/external/pytorch/torch/distributed/_shard/sharding_spec/
D_internals.py12 # For each dim of each shard, check if one shard resides on the other
13 # end of second shard with respect to that dim. As an example for a 2D
14 # shard, we would check if one shard is above or on the left of the
15 # other shard.
43 # For each dim of each shard, check if one shard resides on the other
44 # end of second shard with respect to that dim. As an example for a 2D
45 # shard, we would check if one shard is above or on the left of the
46 # other shard.
78 each shard.
120 objects representing each shard of the tensor.
[all …]
/external/rust/android-crates-io/crates/sharded-slab/src/
Dshard.rs37 pub(crate) struct Shard<T, C: cfg::Config> { struct
38 /// The shard's parent thread ID.
42 /// These are only ever accessed from this shard's thread, so they are
46 /// The shared state for each page in this shard.
59 struct Ptr<T, C: cfg::Config>(AtomicPtr<alloc::Track<Shard<T, C>>>); argument
64 // === impl Shard ===
66 impl<T, C> Shard<T, C> implementation
102 impl<T, C> Shard<Option<T>, C> implementation
106 /// Remove an item on the shard's local thread.
118 /// Remove an item, while on a different thread from the shard's local thread.
[all …]
/external/tensorflow/tensorflow/python/data/kernel_tests/
Dshard_test.py15 """Tests for `tf.data.Dataset.shard()`."""
31 dataset = dataset_ops.Dataset.range(10).shard(5, 2)
38 dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)
43 dataset = dataset_ops.Dataset.range(10).shard(5, 0)
49 dataset = dataset_ops.Dataset.range(10).shard(5, 7)
55 dataset = dataset_ops.Dataset.range(10).shard(5, -3)
61 dataset = dataset_ops.Dataset.range(10).shard(-3, 1)
67 dataset = dataset_ops.Dataset.range(10).shard(0, 1)
72 dataset = dataset_ops.Dataset.range(1).shard(5, 2)
77 dataset = dataset_ops.Dataset.range(10).shard(7, 5)
[all …]
/external/tensorflow/tensorflow/core/util/
Dwork_sharder.h27 // allows you to specify the strategy for choosing shard sizes, including using
28 // a fixed shard size. Use this function only if you want to manually cap
33 // total - 1. Each shard contains 1 or more units of work and the
34 // total cost of each shard is roughly the same. The calling thread and the
35 // "workers" are used to compute each shard (calling work(start,
41 // many shards and CPU time will be dominated by per-shard overhead, such as
47 // limit), i.e., [start, limit) is a shard.
50 // therefore, Shard() often limits the maximum parallelism. Each
52 // call SetMaxParallelism() so that all Shard() calls later limits the
59 void Shard(int max_parallelism, thread::ThreadPool* workers, int64_t total,
[all …]

12345678910>>...47