Home
last modified time | relevance | path

Searched +full:max +full:- +full:parallel (Results 1 – 25 of 767) sorted by relevance

12345678910>>...31

/third_party/curl/docs/cmdline-opts/
Dparallel-max.md1 ---
3 SPDX-License-Identifier: curl
4 Long: parallel-max
6 Help: Maximum concurrency for parallel transfers
10 See-also:
11 - parallel
13 - --parallel-max 100 -Z $URL ftp://example.com/
14 ---
16 # `--parallel-max`
18 When asked to do parallel transfers, using --parallel, this option controls
[all …]
Dparallel-immediate.md1 ---
3 SPDX-License-Identifier: curl
4 Long: parallel-immediate
5 Help: Do not wait for multiplexing (with --parallel)
10 See-also:
11 - parallel
12 - parallel-max
14 - --parallel-immediate -Z $URL -o file1 $URL -o file2
15 ---
17 # `--parallel-immediate`
[all …]
DMakefile.inc21 # SPDX-License-Identifier: curl
48 abstract-unix-socket.md \
49 alt-svc.md \
52 aws-sigv4.md \
54 ca-native.md \
57 cert-status.md \
58 cert-type.md \
61 compressed-ssh.md \
64 connect-timeout.md \
65 connect-to.md \
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/allreduce_fusion/
Dallreduce_node.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/allreduce_fusion/allreduce_node.h"
19 #include "frontend/parallel/tensor_layout/tensor_layout.h"
23 namespace parallel { namespace
33 Status AllreduceNode::AddPrev(const AllreduceNodePtr &prev_node, double dist, double *max) { in AddPrev() argument
43 double add_dist = prev_node->depend_feat_size() + dist; in AddPrev()
45 if (depend_feat_size_ > *max) { in AddPrev()
46 *max = depend_feat_size_; in AddPrev()
54 ele->AddDependFeatSize(add_dist); in AddPrev()
55 if (ele->depend_feat_size() > *max) { in AddPrev()
[all …]
Dallreduce_node.h2 * Copyright 2020-2023 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
25 #include "frontend/parallel/status.h"
28 namespace parallel {
44 // max is the current max depend_feat_size of the AllreduceGraph
45 Status AddPrev(const AllreduceNodePtr &prev_node, double dist, double *max);
63 } // namespace parallel
Dallreduce_graph.cc2 * Copyright 2020-2023 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/allreduce_fusion/allreduce_graph.h"
21 #include "frontend/parallel/allreduce_fusion/allreduce_node.h"
22 #include "frontend/parallel/ops_info/ops_utils.h"
26 namespace parallel { namespace
31 MS_LOG(INFO) << "node: " << node->DebugString() << " has already been added!"; in AddNode()
36 arnode = cnode_arnode_pair->second; in AddNode()
42 if (arnode->Init(node) != SUCCESS) { in AddNode()
46 if (arnode->AddPara(para) != SUCCESS) { in AddNode()
[all …]
Dallreduce_graph.h2 * Copyright 2020-2023 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
27 #include "frontend/parallel/allreduce_fusion/allreduce_node.h"
28 #include "frontend/parallel/status.h"
31 namespace parallel {
66 double max() const { return max_; } in max() function
82 } // namespace parallel
/third_party/skia/m133/src/pathops/
DSkDLineIntersection.cpp4 * Use of this source code is governed by a BSD-style license that can be
17 void SkIntersections::cleanUpParallelLines(bool parallel) { in cleanUpParallelLines() argument
21 if (fUsed == 2 && !parallel) { in cleanUpParallelLines()
48 SkDVector aLen = a[1] - a[0]; in intersectRay()
49 SkDVector bLen = b[1] - b[0]; in intersectRay()
54 byLen * axLen - ayLen * bxLen == 0 ( == denom ) in intersectRay()
56 double denom = bLen.fY * aLen.fX - aLen.fY * bLen.fX; in intersectRay()
59 SkDVector ab0 = a[0] - b[0]; in intersectRay()
60 double numerA = ab0.fY * bLen.fX - bLen.fY * ab0.fX; in intersectRay()
61 double numerB = ab0.fY * aLen.fX - aLen.fY * ab0.fX; in intersectRay()
[all …]
/third_party/skia/src/pathops/
DSkDLineIntersection.cpp4 * Use of this source code is governed by a BSD-style license that can be
12 void SkIntersections::cleanUpParallelLines(bool parallel) { in cleanUpParallelLines() argument
16 if (fUsed == 2 && !parallel) { in cleanUpParallelLines()
43 SkDVector aLen = a[1] - a[0]; in intersectRay()
44 SkDVector bLen = b[1] - b[0]; in intersectRay()
49 byLen * axLen - ayLen * bxLen == 0 ( == denom ) in intersectRay()
51 double denom = bLen.fY * aLen.fX - aLen.fY * bLen.fX; in intersectRay()
54 SkDVector ab0 = a[0] - b[0]; in intersectRay()
55 double numerA = ab0.fY * bLen.fX - bLen.fY * ab0.fX; in intersectRay()
56 double numerB = ab0.fY * aLen.fX - aLen.fY * ab0.fX; in intersectRay()
[all …]
/third_party/vk-gl-cts/external/openglcts/docs/specs/
DCTS_ARB_parallel_shader_compile.txt28 - ARB_parallel_shader_compile extension specification,
29 - OpenGL 4.5 (CoreProfile) specification.
45 - MAX_SHADER_COMPILER_THREADS_ARB
49 Max Shader Compile Threads Test
54 - 0 (non parallel compilation)
55 - 0xFFFFFFFF (maximum threads parallel compilation)
64 - Set max shader compiler threads to 0.
71 - Set max shader compiler threads to 8.
84 - Intial version;
/third_party/mesa3d/src/freedreno/ci/
Dfreedreno-a420-skips.txt2 # reliable to be run in parallel with other tests due to CPU-side timing.
3 dEQP-GLES[0-9]*.functional.flush_finish.*
6 dEQP-GLES31.functional.ssbo.layout.random.all_shared_buffer.36
9 KHR-GLES31.core.shader_image_load_store.basic-allFormats-store-fs
16 spec@glsl-1.50
17 spec@glsl-4.*
24 spec@!opengl 1.2@tex3d-maxsize
27 glx@glx-multithread-texture
28 spec@!opengl 1.1@draw-sync
29 spec@glsl-1.30@execution@texelfetch fs sampler2d 1x281-501x281
[all …]
/third_party/curl/docs/
DPARALLEL-TRANSFERS.md1 <!--
4 SPDX-License-Identifier: curl
5 -->
7 # Parallel transfers
10 parallel.
12 ## -Z, --parallel
15 at the same time. It does up to `--parallel-max` concurrent transfers, with a
20 The progress meter that is displayed when doing parallel transfers is
42 72 -- 37.9G 0 101 30 23 0:00:55 0:00:34 0:00:22 2752M
Doptions-in-versions13 --abstract-unix-socket 7.53.0
14 --alt-svc 7.64.1
15 --anyauth 7.10.6
16 --append (-a) 4.8
17 --aws-sigv4 7.75.0
18 --basic 7.10.6
19 --ca-native 8.2.0
20 --cacert 7.5
21 --capath 7.9.8
22 --cert (-E) 5.0
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/parallel/_transformer/
Dloss.py7 # http://www.apache.org/licenses/LICENSE-2.0
16 Parallel Loss for the Parallel Training.
21 from mindspore.parallel import set_algo_parameters
28 from mindspore.parallel._utils import _get_parallel_mode, _is_sharding_propagation
30 from mindspore.parallel._utils import _get_device_num, _get_pipeline_stages
33 from mindspore.parallel._transformer.layers import _check_input_dtype
34 from mindspore.parallel._transformer.op_parallel_config import default_dpmp_config, OpParallelConfig
48 parallel_config (OpParallelConfig): The parallel configure. Default `default_dpmp_config`,
52- **logits** (Tensor) - Tensor of shape (N, C). Data type must be float16 or float32. The output l…
71 self.max = P.ArgMaxWithValue(axis=-1, keep_dims=True).shard(
[all …]
/third_party/mindspore/mindspore-src/source/tests/ut/python/parallel/
Dtest_scatter_ops.py7 # http://www.apache.org/licenses/LICENSE-2.0
22 from parallel.utils.utils import ParallelValidator
25 "Min": P.ScatterMin(), "Max": P.ScatterMax(), "Sub": P.ScatterSub()}
58 Feature: test scatter ops auto parallel
76 Feature: test scatter ops auto parallel
94 Feature: test scatter ops auto parallel
106 net = Net(input_shape, indices_shape, updates_shape, strategy1, strategy2, "Max")
112 Feature: test scatter ops auto parallel
129 'ScatterAdd-0': ['input', 'Minimum-0', 'Mul-0'],
130 'Mul-0': ['_GetTensorSlice-1', 'Reshape-0'],
[all …]
/third_party/mesa3d/src/nouveau/compiler/nak/
Dto_cssa.rs2 // SPDX-License-Identifier: MIT
18 fn new(a: I, b: I) -> Self { in new()
32 fn next(&mut self) -> Option<<I as Iterator>::Item> { in next()
48 fn size_hint(&self) -> (usize, Option<usize>) { in size_hint()
80 fn new(live: &'a SimpleLiveness) -> Self { in new()
93 // Set it to usize::MAX for now. We'll update later in add_ssa()
94 if self.ssa_node.insert(ssa, usize::MAX).is_none() { in add_ssa()
97 set: usize::MAX, in add_ssa()
109 let old = self.phi_node_file.insert(phi, (usize::MAX, file)); in add_phi_dst()
113 set: usize::MAX, in add_phi_dst()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/dynamic_shape/
Ddynamic_shape.h8 * http://www.apache.org/licenses/LICENSE-2.0
31 #include "frontend/parallel/step_parallel.h"
32 #include "frontend/parallel/graph_util/generate_graph.h"
36 namespace parallel {
38 int64_t max = 1; member
69 } // namespace parallel
Ddynamic_shape.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/dynamic_shape/dynamic_shape.h"
38 namespace parallel { namespace
42 if (int_s->is_const()) { // static shape element in GetSymbolInfo()
43 tmp.max = int_s->value(); in GetSymbolInfo()
44 tmp.min = int_s->value(); in GetSymbolInfo()
45 tmp.divisor = int_s->value(); in GetSymbolInfo()
48 tmp.max = int_s->range_max(); in GetSymbolInfo()
49 tmp.min = int_s->range_min(); in GetSymbolInfo()
50 tmp.divisor = int_s->divisor(); in GetSymbolInfo()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/ops_info/
Dquant_info.cc8 * http://www.apache.org/licenses/LICENSE-2.0
16 #include "frontend/parallel/ops_info/quant_info.h"
21 #include "frontend/parallel/dynamic_creator.h"
22 #include "frontend/parallel/graph_util/generate_graph.h"
25 namespace parallel { namespace
34 …MS_LOG(ERROR) << name_ << ": only support that both shape of min and max are 1, but the shape of m… in GetAttrs()
35 << inputs_shape_[1] << ", and the shape of max is " << inputs_shape_[2]; in GetAttrs()
40 …MS_LOG(ERROR) << name_ << ": only support that both shape of min and max are 1, but the shape of m… in GetAttrs()
41 << inputs_shape_[1] << ", and the shape of max is " << inputs_shape_[2]; in GetAttrs()
57 Strategies strategies = strategy_->GetInputDim(); in InferDevMatrixShape()
[all …]
Dvirtual_dataset_info.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/ops_info/virtual_dataset_info.h"
25 #include "frontend/parallel/device_manager.h"
26 #include "frontend/parallel/device_matrix.h"
27 #include "frontend/parallel/dynamic_creator.h"
28 #include "frontend/parallel/step_parallel.h"
33 namespace parallel { namespace
43 *squashed_stra = stra->GetInputDim(); in GetSquashedStrategyAndShape()
45 if (squashed_stra->empty()) { in GetSquashedStrategyAndShape()
50 if (!stra->HasTupleInTupleStrategy()) { in GetSquashedStrategyAndShape()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/_extends/graph_kernel/model/
Dgraph_parallel.py1 # Copyright 2021-2024 Huawei Technologies Co., Ltd
7 # http://www.apache.org/licenses/LICENSE-2.0
15 """Cost model for parallel fusion"""
64 const_size = max((self.prod(op.output.shape) for op in self.dom_op))
65 const_size = (const_size + self.MAX_NUM_THREADS -
69 total_block = (const_size + self.MAX_NUM_THREADS -
74 waves = (total_block + self.MAX_BLOCK - 1) // self.MAX_BLOCK
87 raise RuntimeError("Parallel fusion does not support multiple reduce op now.")
90 raise RuntimeError("Parallel fusion does not find a reduce op.")
100 block_x = (total_space // red_space + thread_y - 1) // thread_y
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/_extends/parallel_compile/akg_compiler/
Dakg_process.py1 # Copyright 2020-2022 Huawei Technologies Co., Ltd
7 # http://www.apache.org/licenses/LICENSE-2.0
80 raise FileNotFoundError("Can not compile non-existing file \"{}\"".format(info_path))
109 if -1 in shape or -2 in shape:
128 raise FileNotFoundError(f"Can not compile non-existing file \"{info_path}\"")
143 create Akg V2 Parallel Compiler object
152 """base class for akg kernel parallel process"""
158 wait_time: int. max time the function blocked
202 """akg kernel parallel process"""
208 wait_time: int. max time the function blocked
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/
Dhistogram.cc2 * Copyright 2022-2024 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
37 // when input data size is more than kParallelDataNum, use Parallel func
58 …static_cast<int64_t>((elt - static_cast<InterType>(leftmost_edge)) / step * static_cast<InterType>…
72 ctx.GetOpType().c_str(), x->GetDataSize(), y->GetDataSize()); in ParamCheck()
80 auto x_data = reinterpret_cast<T *>(x->GetData()); in DoCompute()
81 auto y_data = reinterpret_cast<int32_t *>(y->GetData()); in DoCompute()
82 int64_t x_num = x->NumElements(); in DoCompute()
83 int32_t y_num = y->NumElements(); in DoCompute()
86 min_attr = ctx.GetAttr("min")->GetFloat(); in DoCompute()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/ops/operations/
Dcomm_ops.py1 # Copyright 2020-2023 Huawei Technologies Co., Ltd
7 # http://www.apache.org/licenses/LICENSE-2.0
35 - SUM: ReduceOp.SUM.
36 - MAX: ReduceOp.MAX.
37 - MIN: ReduceOp.MIN.
38 - PROD: ReduceOp.PROD.
40 There are four kinds of operation options, "SUM", "MAX", "MIN", and "PROD".
42 - SUM: Take the sum.
43 - MAX: Take the maximum.
44 - MIN: Take the minimum.
[all …]
/third_party/rust/crates/libc/.github/workflows/
Dbors.yml6 - auto-libc
7 - try
13 … actions: write # to cancel workflows (rust-lang/simpleinfra/github-actions/cancel-outdated-builds)
17 runs-on: ubuntu-22.04
19 fail-fast: true
22 i686-unknown-linux-gnu,
23 x86_64-unknown-linux-gnu,
26 - uses: rust-lang/simpleinfra/github-actions/cancel-outdated-builds@master
29 - uses: actions/checkout@v3
30 - name: Setup Rust toolchain
[all …]

12345678910>>...31