Home
last modified time | relevance | path

Searched +full:- +full:- +full:parallel (Results 1 – 25 of 1218) sorted by relevance

12345678910>>...49

/third_party/mindspore/mindspore-src/source/tests/ut/cpp/pipeline/parse/
Dparallel_if.cc8 * http://www.apache.org/licenses/LICENSE-2.0
62 …tensor::TensorPtr x_tensor = std::make_shared<tensor::Tensor>(kFloat32->type_id(), std::vector<int… in CheckParallelIfTransform()
63 …tensor::TensorPtr y_tensor = std::make_shared<tensor::Tensor>(kFloat32->type_id(), std::vector<int… in CheckParallelIfTransform()
70 pipeline::AbstractAnalyze(res1->engine(), basic_graph, args_spec_list, res1->is_load()); in CheckParallelIfTransform()
71 auto new_basic_graph = pipeline::ProgramSpecialize(res1->engine(), basic_graph, result.context); in CheckParallelIfTransform()
74 … result = pipeline::AbstractAnalyze(res2->engine(), manual_graph, args_spec_list, res2->is_load()); in CheckParallelIfTransform()
75 … auto new_manual_graph = pipeline::ProgramSpecialize(res2->engine(), manual_graph, result.context); in CheckParallelIfTransform()
94 const auto &node_users = manager->node_users(); in CheckParallelIfTransformationCount()
95 for (const auto &node : manager->all_nodes()) { in CheckParallelIfTransformationCount()
99 ASSERT_EQ(switch_cnode_user_iter->second.size(), 1); in CheckParallelIfTransformationCount()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/pass/
Dlabel_micro_interleaved_index.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/label_micro_interleaved_index.h"
27 #include "frontend/parallel/step_parallel.h"
30 namespace parallel { namespace
39 if (!node->isa<CNode>()) { in IsBpropNode()
42 return node->fullname_with_scope().find(kGradientsFlag) == 0; in IsBpropNode()
46 int64_t pipeline_micro = -1) { in SpreadMicroInterleavedIndexForForwardCommNodes()
53 auto cnode_inputs = cnode->inputs(); in SpreadMicroInterleavedIndexForForwardCommNodes()
66 auto input_cnode = input->cast<CNodePtr>(); in SpreadMicroInterleavedIndexForForwardCommNodes()
67 if (input_cnode->HasAttr(MICRO_INTERLEAVED_TAG) || input_cnode->HasAttr(INTERLEAVED_NUM)) { in SpreadMicroInterleavedIndexForForwardCommNodes()
[all …]
Dlabel_fine_grained_interleaved_index.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/label_fine_grained_interleaved_index.h"
27 #include "frontend/parallel/step_parallel.h"
28 #include "frontend/parallel/step_parallel_utils.h"
31 namespace parallel { namespace
43 return !(cnode->HasPrimalAttr(kPrimalAttrForwardUniqueId) || cnode->HasAttr(kAttrDuplicated)); in IsForwardNode()
48 if (!node->isa<CNode>()) { in IsBpropNode()
51 return node->fullname_with_scope().find(kGradientsFlag) == 0; in IsBpropNode()
58 auto func_graph = cnode->func_graph(); in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
60 auto manager = func_graph->manager(); in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
[all …]
Doverlap_opt_shard_in_pipeline.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/overlap_opt_shard_in_pipeline.h"
26 #include "frontend/parallel/ops_info/ops_utils.h"
27 #include "frontend/parallel/device_manager.h"
29 #include "frontend/parallel/step_parallel_utils.h"
34 namespace parallel { namespace
42 auto allgather_instance_name = GetCNodePrimitive(node->cast<CNodePtr>())->instance_name(); in is_allgather_comm_ops()
43 if (allgather_instance_name.find(parallel::PARALLEL_OPTIMIZER) == std::string::npos) { in is_allgather_comm_ops()
54 auto recv_node = node->cast<CNodePtr>(); in is_first_receive()
55 if (recv_node->HasPrimalAttr(kPrimalAttrForwardNodeName)) { in is_first_receive()
[all …]
Dslice_activation_in_cell_share_recompute.cc2 * Copyright 2023-2024 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/slice_activation_in_cell_share_recompute.h"
21 #include "frontend/parallel/step_parallel.h"
22 #include "frontend/parallel/step_parallel_utils.h"
23 #include "frontend/parallel/graph_util/graph_utils.h"
24 #include "frontend/parallel/tensor_layout/construct_operator.h"
30 namespace parallel { namespace
32 CNodePtr CreateStridedSliceCNode(const parallel::Shape &begin, const parallel::Shape &end, in CreateStridedSliceCNode()
33 const parallel::Shape &strides, const AnfNodePtr &node) { in CreateStridedSliceCNode()
[all …]
Dfull_micro_interleaved_order_control.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/full_micro_interleaved_order_control.h"
28 #include "frontend/parallel/step_parallel.h"
31 namespace parallel { namespace
40 if (!node->isa<CNode>()) { in IsBpropNode()
43 return node->fullname_with_scope().find(kGradientsFlag) == 0; in IsBpropNode()
49 if (prim1->type_name() != prim2->type_name()) { in CheckCommNodeEqual()
53 if (!prim1->HasAttr(parallel::GROUP) || !prim2->HasAttr(parallel::GROUP)) { in CheckCommNodeEqual()
56 auto group1 = GetValue<std::string>(prim1->GetAttr(parallel::GROUP)); in CheckCommNodeEqual()
57 auto group2 = GetValue<std::string>(prim2->GetAttr(parallel::GROUP)); in CheckCommNodeEqual()
[all …]
Dmerge_cast_opt.cc8 * http://www.apache.org/licenses/LICENSE-2.0
17 #include "frontend/parallel/pass/merge_cast_opt.h"
28 #include "frontend/parallel/ops_info/ops_utils.h"
29 #include "frontend/parallel/step_parallel_utils.h"
30 #include "frontend/parallel/pass/pass_utils.h"
33 namespace parallel { namespace
39 if (!parallel::ParallelContext::GetInstance()->enable_fine_grained_micro_interleaved()) { in InsertMakeTupleInput()
42 auto cnode = node->cast<CNodePtr>(); in InsertMakeTupleInput()
43 if (cnode->HasAttr(parallel::FINE_GRAINED_INTERLEAVED_TAG)) { in InsertMakeTupleInput()
44 auto tag = GetValue<size_t>(cnode->GetAttr(parallel::FINE_GRAINED_INTERLEAVED_TAG)); in InsertMakeTupleInput()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/optimizer/
Dslice_activation_in_recompute.cc2 * Copyright 2020-2024 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
28 #include "frontend/parallel/tensor_layout/construct_operator.h"
29 #include "frontend/parallel/graph_util/graph_utils.h"
30 #include "frontend/parallel/step_parallel.h"
38 CNodePtr CreateStridedSliceCNode(const parallel::Shape &begin, const parallel::Shape &end, in CreateStridedSliceCNode()
39 const parallel::Shape &strides, const AnfNodePtr &node) { in CreateStridedSliceCNode()
40 auto slice_op = parallel::CreateStridedSliceOp(0, begin, end, strides); in CreateStridedSliceCNode()
41 auto slice_input = parallel::CreateInput(slice_op, node, parallel::STRIDEDSLICE); in CreateStridedSliceCNode()
42 auto func_graph = node->func_graph(); in CreateStridedSliceCNode()
[all …]
Dcomm_op_reuse_tag.cc8 * http://www.apache.org/licenses/LICENSE-2.0
24 #include "frontend/parallel/ops_info/ops_utils.h"
25 #include "frontend/parallel/device_manager.h"
27 #include "frontend/parallel/step_parallel_utils.h"
62 if (parallel::g_device_manager == nullptr) { in AddCommOpReuseTag()
63 MS_LOG(INFO) << "parallel::g_device_manager is not initialized."; in AddCommOpReuseTag()
68 if (!parallel::IsAutoParallelCareGraph(graph)) { in AddCommOpReuseTag()
71 auto manager = graph->manager(); in AddCommOpReuseTag()
73 const auto &all_nodes = manager->all_nodes(); in AddCommOpReuseTag()
81 …if (comm_prim->HasAttr(parallel::FUSION) && GetValue<int64_t>(comm_prim->GetAttr(parallel::FUSION)… in AddCommOpReuseTag()
[all …]
Dflash_sp.cc8 * http://www.apache.org/licenses/LICENSE-2.0
46 #include "frontend/parallel/ops_info/ops_utils.h"
47 #include "frontend/parallel/ops_info/operator_info.h"
48 #include "frontend/parallel/tensor_layout/tensor_info.h"
49 #include "frontend/parallel/device_matrix.h"
52 #include "mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.h"
55 #include "frontend/parallel/step_parallel_utils.h"
56 #include "mindspore/ccsrc/frontend/parallel/ops_info/flash_attention_score_info.h"
58 #include "frontend/parallel/graph_util/graph_info.h"
62 namespace parallel { namespace
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/pipeline/jit/ps/
Dpipeline_split.cc2 * Copyright 2020-2022 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
30 #include "frontend/parallel/pipeline_transformer/pipeline_transformer.h"
31 #include "frontend/parallel/pipeline_transformer/pipeline_interleave.h"
32 #include "frontend/parallel/pipeline_transformer/fold_pipeline_transformer.h"
33 #include "frontend/parallel/dynamic_shape/dynamic_shape.h"
34 #include "frontend/parallel/step_parallel.h"
35 #include "frontend/parallel/step_parallel_utils.h"
36 #include "frontend/parallel/graph_util/pipeline_split_utils.h"
37 #include "frontend/parallel/parameter_manager.h"
[all …]
Dpass.cc2 * Copyright 2019-2024 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
41 #include "frontend/parallel/dynamic_shape/dynamic_shape.h"
42 #include "frontend/parallel/step_parallel.h"
43 #include "frontend/parallel/step_auto_parallel.h"
44 #include "frontend/parallel/graph_util/pipeline_split_utils.h"
45 #include "frontend/parallel/pipeline_transformer/pipeline_scheduler.h"
46 #include "frontend/parallel/pipeline_transformer/pipeline_interleave.h"
47 #include "frontend/parallel/pipeline_transformer/gpipe_interleave_scheduler.h"
48 #include "frontend/parallel/pass/merge_comm.h"
[all …]
/third_party/mindspore/mindspore-src/source/tests/ut/cpp/python_input/gtest_input/pipeline/parse/
Dparallel_if.py7 # http://www.apache.org/licenses/LICENSE-2.0
33 # pylint: disable=unused-variable
37 Feature: Parallel if transformation
48 x = x - y
60 return x - y
73 Feature: Parallel if transformation
84 x = x - y
98 return x - y
128 Feature: Parallel if transformation
140 x = x - y
[all …]
/third_party/curl/docs/cmdline-opts/
Dparallel-immediate.md1 ---
3 SPDX-License-Identifier: curl
4 Long: parallel-immediate
5 Help: Do not wait for multiplexing (with --parallel)
10 See-also:
11 - parallel
12 - parallel-max
14 - --parallel-immediate -Z $URL -o file1 $URL -o file2
15 ---
17 # `--parallel-immediate`
[all …]
Dparallel-max.md1 ---
3 SPDX-License-Identifier: curl
4 Long: parallel-max
6 Help: Maximum concurrency for parallel transfers
10 See-also:
11 - parallel
13 - --parallel-max 100 -Z $URL ftp://example.com/
14 ---
16 # `--parallel-max`
18 When asked to do parallel transfers, using --parallel, this option controls
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Frontend/OpenMP/
DOMPKinds.def1 //===--- OMPKinds.def - OpenMP directives, clauses, rt-calls -*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
27 __OMP_DIRECTIVE(parallel)
50 __OMP_DIRECTIVE_EXT(target_parallel, "target parallel")
51 __OMP_DIRECTIVE_EXT(target_parallel_for, "target parallel for")
53 __OMP_DIRECTIVE_EXT(parallel_for, "parallel for")
54 __OMP_DIRECTIVE_EXT(parallel_for_simd, "parallel for simd")
55 __OMP_DIRECTIVE_EXT(parallel_master, "parallel master")
[all …]
/third_party/mindspore/mindspore-src/source/tests/ut/python/parallel/
Dtest_kldiv_loss.py7 # http://www.apache.org/licenses/LICENSE-2.0
21 from parallel.utils.utils import ParallelValidator, compile_net
44 Features: test KLDivLoss auto parallel
45 Description: auto parallel, reduction is 'mean'
58 Features: test KLDivLoss auto parallel
59 Description: auto parallel, reduction is 'none'
72 Features: test KLDivLoss auto parallel
73 Description: auto parallel, reduction is 'sum'
86 Features: test KLDivLoss data parallel
87 Description: data parallel, reduction is 'mean'
[all …]
Dtest_lerp.py9 from parallel.utils.utils import compile_net, ParallelValidator
55 Feature: test Lerp auto parallel
56 Description: auto parallel when 'weight' is tensor
67 Feature: test Lerp auto parallel
68 Description: auto parallel when 'weight' is float
79 Feature: test Lerp model parallel
80 Description: model parallel when 'weight' is tensor
91 Feature: test Lerp model parallel
92 Description: model parallel when 'weight' is float
103 Feature: test Lerp model parallel with repeated calculation
[all …]
Dtest_lin_space_ext.py7 # http://www.apache.org/licenses/LICENSE-2.0
23 from parallel.utils.utils import ParallelValidator, compile_net
45 Feature: test LinSpaceExt data parallel
46 Description: data parallel
56 assert validator.check_node_inputs('LinSpaceExt-0', ['ScalarAdd-0', 'ScalarAdd-1', 1, 43])
61 Feature: test LinSpaceExt parallel with dynamic shape
73 assert validator.check_node_inputs('LinSpaceExt-0',
74 … ['TensorToScalar-0', 'TensorToScalar-1', 'TensorToScalar-2', 43])
79 Feature: test LinSpaceExt parallel
80 Description: parallel with repeated_cal
[all …]
Dtest_batch_matmul.py7 # http://www.apache.org/licenses/LICENSE-2.0
58 Feature: distribute operator batch_matmul in auto parallel.
59 Description: mul-batch_matmul net with data parallel strategy in semi auto parallel.
71 Feature: distribute operator batch_matmul in auto parallel.
72 Description: mul-batch_matmul net with model parallel strategy in semi auto parallel.
84 Feature: distribute operator batch_matmul in auto parallel.
85 Description: mul-batch_matmul net with mixed strategy in semi auto parallel.
97 Feature: distribute operator batch_matmul in auto parallel.
98 Description: mul-batch_matmul net in auto parallel.
109 Feature: distribute operator batch_matmul in auto parallel.
[all …]
Dtest_arithmetic.py7 # http://www.apache.org/licenses/LICENSE-2.0
73 Feature: distribute operator sub in auto parallel.
74 Description: matmul-sub net with strategy in semi auto parallel.
103 Feature: distribute operator sub in auto parallel.
104 Description: matmul-sub net with strategy in semi auto parallel.
131 Feature: distribute operator sub in auto parallel.
132 Description: matmul-sub net with strategy in semi auto parallel.
160 Feature: distribute operator sub in auto parallel.
161 Description: matmul-add net with strategy in semi auto parallel.
190 Feature: distribute operator sub in auto parallel.
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/hal/profiler/
Dparallel_strategy_profiling.cc2 * Copyright 2021-2022 Huawei Technologies Co., Ltd
8 * http://www.apache.org/licenses/LICENSE-2.0
47 if (!ascend_profiler->IsInitialized() || !ascend_profiler->GetParallelStrategyEnableFlag()) { in IsProfilingParallelStrategyEnabled()
48 MS_LOG(INFO) << "Profiling parallel strategy is disabled."; in IsProfilingParallelStrategyEnabled()
53 if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) { in IsProfilingParallelStrategyEnabled()
54 MS_LOG(INFO) << "Current is ps server or ps scheduler, profiling parallel " in IsProfilingParallelStrategyEnabled()
60 std::string parallel_mode = parallel::ParallelContext::GetInstance()->parallel_mode(); in IsProfilingParallelStrategyEnabled()
61 …if ((parallel_mode == parallel::kAutoParallel) || (parallel_mode == parallel::kSemiAutoParallel) || in IsProfilingParallelStrategyEnabled()
62 (parallel_mode == parallel::kDataParallel)) { in IsProfilingParallelStrategyEnabled()
66 …MS_LOG(INFO) << "Profiling parallel strategy is disabled, current parallel mode is " << parallel_m… in IsProfilingParallelStrategyEnabled()
[all …]
/third_party/cups-filters/backend/
Dparallel.c2 * Parallel port backend for OpenPrinting CUPS Filters.
4 * Copyright 2007-2011 by Apple Inc.
5 * Copyright 1997-2007 by Easy Software Products, all rights reserved.
14 * main() - Send a file to the specified parallel port.
15 * drain_output() - Drain pending print data to the device.
16 * list_devices() - List all parallel devices.
17 * run_loop() - Read and write print and back-channel data.
18 * side_cb() - Handle side-channel requests...
25 #include "backend-private.h"
44 * 'main()' - Send a file to the specified parallel port.
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/parallel/
D_auto_parallel_context.py1 # Copyright 2020-2023 Huawei Technologies Co., Ltd
7 # http://www.apache.org/licenses/LICENSE-2.0
15 """Context of auto parallel"""
22 from mindspore.parallel._dp_allreduce_fusion import _set_fusion_strategy_by_idx, _set_fusion_strate…
23 from mindspore.parallel._ps_context import _is_role_pserver
35 The key of the Parallel fusion method configuration.
61 The key of the Parallel Optimizer. There are three
115 Set device num for auto parallel.
138 Set fusion method for auto parallel.
173 Set fusion threshold (MB) for auto parallel.
[all …]
/third_party/mindspore/mindspore-src/source/tests/st/control/
Dtest_parallel_if.py7 # http://www.apache.org/licenses/LICENSE-2.0
23 Feature: Parallel if transformation.
24 Description: return in while loop requires that the after-if func graph should not
40 bias = Tensor([-5], mstype.int32)
41 expect = Tensor([-5], mstype.int32)
48 Feature: Parallel if transformation.
49 Description: return in inner while loop requires that the after-if func graph should not
63 x = x - 1
69 bias = Tensor([-5], mstype.int32)
70 expect = Tensor([-5], mstype.int32)
[all …]

12345678910>>...49