Home
last modified time | relevance | path

Searched full:parallel (Results 1 – 25 of 3427) sorted by relevance

12345678910>>...138

/third_party/mindspore/mindspore-src/source/tests/ut/cpp/pipeline/parse/
Dparallel_if.cc128 // Feature: Parallel if transformation
129 // Description: Check parallel if transformatin for test code with single if/else.
133 // Feature: Parallel if transformation
134 // Description: Check parallel if transformatin for test code with if-by-if.
138 // Feature: Parallel if transformation
139 // Description: Check parallel if transformatin for test code with if-in-if.
143 // Feature: Parallel if transformation
144 // Description: Check parallel if transformatin for test code with if-elif-else.
149 // Feature: Parallel if transformation
150 // Description: Check parallel if transformatin for if/else(while return).
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/pass/
Dlabel_micro_interleaved_index.cc17 #include "frontend/parallel/pass/label_micro_interleaved_index.h"
27 #include "frontend/parallel/step_parallel.h"
30 namespace parallel { namespace
70 bool is_pipeline = (pipeline_micro >= 0 && input_cnode->HasPrimalAttr(parallel::MICRO)); in SpreadMicroInterleavedIndexForForwardCommNodes()
71 …if (is_pipeline && GetValue<int64_t>(input_cnode->GetPrimalAttr(parallel::MICRO)) != pipeline_micr… in SpreadMicroInterleavedIndexForForwardCommNodes()
77 if (pipeline_micro >= 0 && !input_cnode->HasPrimalAttr(parallel::MICRO)) { in SpreadMicroInterleavedIndexForForwardCommNodes()
81 …input_cnode->AddAttr(parallel::MICRO_INTERLEAVED_INDEX, MakeValue<size_t>(micro_interleaved_index)… in SpreadMicroInterleavedIndexForForwardCommNodes()
82 …input_cnode->AddAttr(parallel::MICRO_INTERLEAVED_FORWARD_COMM_ORDER, MakeValue<size_t>(forward_ord… in SpreadMicroInterleavedIndexForForwardCommNodes()
114 if (!forward_node->HasAttr(parallel::MICRO_INTERLEAVED_INDEX) || in LabelMicroInterleavedIndexForBackwardCommNodes()
115 !forward_node->HasAttr(parallel::MICRO_INTERLEAVED_FORWARD_COMM_ORDER)) { in LabelMicroInterleavedIndexForBackwardCommNodes()
[all …]
Dlabel_fine_grained_interleaved_index.cc17 #include "frontend/parallel/pass/label_fine_grained_interleaved_index.h"
27 #include "frontend/parallel/step_parallel.h"
28 #include "frontend/parallel/step_parallel_utils.h"
31 namespace parallel { namespace
80 …pre_cnode->AddAttr(parallel::MICRO_INTERLEAVED_INDEX, MakeValue<size_t>(fine_grained_interleaved_i… in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
81 pre_cnode->AddPrimalAttr(parallel::FINE_GRAINED_INTERLEAVED_BLOCK, in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
95 (parallel::IsSomePrimitiveList(pre_cnode, {ALL_GATHER, ALL_REDUCE, REDUCE_SCATTER}))) { in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
96 pre_cnode->AddPrimalAttr(parallel::FINE_GRAINED_INTERLEAVED_BLOCK, in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
98 …pre_cnode->AddAttr(parallel::MICRO_INTERLEAVED_INDEX, MakeValue<size_t>(fine_grained_interleaved_i… in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
99 …pre_cnode->AddAttr(parallel::MICRO_INTERLEAVED_FORWARD_COMM_ORDER, MakeValue<size_t>(forward_order… in SpreadFineGrainedInterleavedIndexForForwardCommNodes()
[all …]
Doverlap_opt_shard_in_pipeline.cc17 #include "frontend/parallel/pass/overlap_opt_shard_in_pipeline.h"
26 #include "frontend/parallel/ops_info/ops_utils.h"
27 #include "frontend/parallel/device_manager.h"
29 #include "frontend/parallel/step_parallel_utils.h"
34 namespace parallel { namespace
43 if (allgather_instance_name.find(parallel::PARALLEL_OPTIMIZER) == std::string::npos) { in is_allgather_comm_ops()
58 auto micro = GetValue<int64_t>(recv_node->GetPrimalAttr(parallel::MICRO)); in is_first_receive()
59 if (micro != 0 || recv_node->HasPrimalAttr(parallel::PIPELINE_PARAM)) { in is_first_receive()
75 if (parallel::g_device_manager == nullptr) { in OverlapOptShardInPipeline()
76 MS_LOG(INFO) << "parallel::g_device_manager is not initialized."; in OverlapOptShardInPipeline()
[all …]
Dslice_activation_in_cell_share_recompute.cc17 #include "frontend/parallel/pass/slice_activation_in_cell_share_recompute.h"
21 #include "frontend/parallel/step_parallel.h"
22 #include "frontend/parallel/step_parallel_utils.h"
23 #include "frontend/parallel/graph_util/graph_utils.h"
24 #include "frontend/parallel/tensor_layout/construct_operator.h"
30 namespace parallel { namespace
32 CNodePtr CreateStridedSliceCNode(const parallel::Shape &begin, const parallel::Shape &end, in CreateStridedSliceCNode()
33 const parallel::Shape &strides, const AnfNodePtr &node) { in CreateStridedSliceCNode()
34 auto slice_op = parallel::CreateStridedSliceOp(0, begin, end, strides); in CreateStridedSliceCNode()
35 auto slice_input = parallel::CreateInput(slice_op, node, parallel::STRIDEDSLICE); in CreateStridedSliceCNode()
[all …]
Dfull_micro_interleaved_order_control.cc17 #include "frontend/parallel/pass/full_micro_interleaved_order_control.h"
28 #include "frontend/parallel/step_parallel.h"
31 namespace parallel { namespace
53 if (!prim1->HasAttr(parallel::GROUP) || !prim2->HasAttr(parallel::GROUP)) { in CheckCommNodeEqual()
56 auto group1 = GetValue<std::string>(prim1->GetAttr(parallel::GROUP)); in CheckCommNodeEqual()
57 auto group2 = GetValue<std::string>(prim2->GetAttr(parallel::GROUP)); in CheckCommNodeEqual()
80 …if (!common::AnfAlgo::IsCommunicationOp(cnode) || !cnode->HasAttr(parallel::MICRO_INTERLEAVED_FORW… in ExtractInterLeavedCommNode()
81 !cnode->HasAttr(parallel::MICRO_INTERLEAVED_INDEX) || cnode->HasAttr(kAttrDuplicated)) { in ExtractInterLeavedCommNode()
89 if (pipeline_micro >= 0 && cnode->HasPrimalAttr(parallel::MICRO) && in ExtractInterLeavedCommNode()
90 GetValue<int64_t>(cnode->GetPrimalAttr(parallel::MICRO)) != pipeline_micro) { in ExtractInterLeavedCommNode()
[all …]
/third_party/mindspore/mindspore-src/source/tests/ut/python/parallel/
Dtest_mul_softmax_net.py63 Feature: distribute operator softmax in auto parallel.
64 Description: data parallel softmax net in auto parallel.
78 Feature: distribute operator softmax in auto parallel.
79 Description: data parallel and half repeat softmax net in auto parallel.
93 Feature: distribute operator softmax in auto parallel.
94 Description: model parallel softmax net in auto parallel.
108 Feature: distribute operator softmax in auto parallel.
109 Description: model parallel with repeate softmax net in auto parallel.
123 Feature: distribute operator softmax in auto parallel.
124 Description: model parallel with half repeate softmax net in auto parallel.
[all …]
Dtest_unsortedsegmentsum.py81 Feature: distribute operator unsorted_segment_sum in auto parallel.
82 …Description: unsorted_segment_sum net with model [arallel strategy in semi auto parallel, slice 1d.
96 Feature: distribute operator unsorted_segment_sum in auto parallel.
97 Description: unsorted_segment_sum net with no slice strategy in semi auto parallel, slice 1d.
111 Feature: distribute operator unsorted_segment_sum in auto parallel.
112 …Description: unsorted_segment_sum net with model parallel strategy in semi auto parallel, slice 2d.
126 Feature: distribute operator unsorted_segment_sum in auto parallel.
127 …Description: unsorted_segment_sum net with model parallel strategy in semi auto parallel, slice 3d.
141 Feature: distribute operator unsorted_segment_sum in auto parallel.
142 … Description: unsorted_segment_sum net with strategy in semi auto parallel, slice different inputs.
[all …]
Dtest_lerp.py9 from parallel.utils.utils import compile_net, ParallelValidator
55 Feature: test Lerp auto parallel
56 Description: auto parallel when 'weight' is tensor
67 Feature: test Lerp auto parallel
68 Description: auto parallel when 'weight' is float
79 Feature: test Lerp model parallel
80 Description: model parallel when 'weight' is tensor
91 Feature: test Lerp model parallel
92 Description: model parallel when 'weight' is float
103 Feature: test Lerp model parallel with repeated calculation
[all …]
Dtest_sparse_gather_v2.py82 Feature: distribute operator SparseGatherV2 in auto parallel.
83 Description: gather net with strategy in semi auto parallel, gather axis is 1.
95 Feature: distribute operator SparseGatherV2 in auto parallel.
96 Description: gather net with strategy in semi auto parallel, gather axis is 1.
107 Feature: distribute operator SparseGatherV2 in auto parallel.
108 Description: gather net with strategy in semi auto parallel, gather axis is 1.
119 Feature: distribute operator SparseGatherV2 in auto parallel.
120 Description: gather net with strategy in semi auto parallel, gather axis is 1.
131 Feature: distribute operator SparseGatherV2 in auto parallel.
132 Description: gather net with strategy in semi auto parallel, gather axis is 0.
[all …]
Dtest_kldiv_loss.py21 from parallel.utils.utils import ParallelValidator, compile_net
44 Features: test KLDivLoss auto parallel
45 Description: auto parallel, reduction is 'mean'
58 Features: test KLDivLoss auto parallel
59 Description: auto parallel, reduction is 'none'
72 Features: test KLDivLoss auto parallel
73 Description: auto parallel, reduction is 'sum'
86 Features: test KLDivLoss data parallel
87 Description: data parallel, reduction is 'mean'
102 Features: test KLDivLoss data parallel
[all …]
Dtest_gather_v2.py25 from parallel.utils.utils import ParallelValidator
97 Feature: distribute operator gather in auto parallel.
98 Description: gather net with strategy in semi auto parallel, gather axis is 0.
111 Feature: distribute operator gather in auto parallel.
112 Description: gather net with strategy in semi auto parallel, gather axis is 0.
125 Feature: distribute operator gather in auto parallel.
126 Description: gather net with strategy in semi auto parallel, gather axis is 0.
139 Feature: distribute operator gather in auto parallel.
140 Description: gather net with strategy in semi auto parallel, gather axis is 1.
153 Feature: distribute operator gather in auto parallel.
[all …]
Dtest_batch_matmul.py58 Feature: distribute operator batch_matmul in auto parallel.
59 Description: mul-batch_matmul net with data parallel strategy in semi auto parallel.
71 Feature: distribute operator batch_matmul in auto parallel.
72 Description: mul-batch_matmul net with model parallel strategy in semi auto parallel.
84 Feature: distribute operator batch_matmul in auto parallel.
85 Description: mul-batch_matmul net with mixed strategy in semi auto parallel.
97 Feature: distribute operator batch_matmul in auto parallel.
98 Description: mul-batch_matmul net in auto parallel.
109 Feature: distribute operator batch_matmul in auto parallel.
110 Description: mul-batch_matmul net with repeated strategy in semi auto parallel.
[all …]
Dtest_softmax_gather_net.py83 Feature: distribute operator gather in auto parallel.
84 Description: gather and softmax net with strategy in semi auto parallel, gather axis is 0.
98 Feature: distribute operator gather in auto parallel.
99 Description: gather and softmax net with strategy in semi auto parallel, gather axis is 0.
113 Feature: distribute operator gather in auto parallel.
114 Description: gather and softmax net with strategy in semi auto parallel, gather axis is 0.
128 Feature: distribute operator gather in auto parallel.
129 Description: gather and softmax net with strategy in semi auto parallel, gather axis is 1.
143 Feature: distribute operator gather in auto parallel.
144 Description: gather net with strategy in semi auto parallel, gather axis is 1.
[all …]
Dtest_arithmetic.py73 Feature: distribute operator sub in auto parallel.
74 Description: matmul-sub net with strategy in semi auto parallel.
103 Feature: distribute operator sub in auto parallel.
104 Description: matmul-sub net with strategy in semi auto parallel.
131 Feature: distribute operator sub in auto parallel.
132 Description: matmul-sub net with strategy in semi auto parallel.
160 Feature: distribute operator sub in auto parallel.
161 Description: matmul-add net with strategy in semi auto parallel.
190 Feature: distribute operator sub in auto parallel.
191 Description: matmul-add net with strategy in semi auto parallel.
[all …]
Dtest_stridedslice.py24 from parallel.utils.utils import ParallelValidator
285 Feature: distribute operator stridedslice in auto parallel mode.
286 Description: test stridedslice with strides no 1 split in semi auto parallel.
299 Feature: distribute operator stridedslice in auto parallel mode.
300 Description: test stridedslice with begin size is smaller in semi auto parallel.
312 Feature: distribute operator stridedslice in auto parallel mode.
313 Description: test stridedslice of parameter in semi auto parallel.
325 Feature: distribute operator stridedslice in auto parallel mode.
326 Description: test stridedslice with begin mask no 0 split in semi auto parallel.
338 Feature: distribute operator stridedslice in auto parallel mode.
[all …]
Dtest_split_ext.py82 Feature: test SplitWithSize auto parallel
83 Description: auto parallel
93 Feature: test SplitWithSize model parallel
94 Description: model parallel
104 Feature: test SplitWithSize parallel with invalid strategy
105 Description: model parallel
116 Feature: test SplitWithSize parallel skip_redistribution
117 Description: model parallel
130 Feature: test SplitTensor auto parallel
131 Description: auto parallel
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/optimizer/
Dslice_activation_in_recompute.cc28 #include "frontend/parallel/tensor_layout/construct_operator.h"
29 #include "frontend/parallel/graph_util/graph_utils.h"
30 #include "frontend/parallel/step_parallel.h"
38 CNodePtr CreateStridedSliceCNode(const parallel::Shape &begin, const parallel::Shape &end, in CreateStridedSliceCNode()
39 const parallel::Shape &strides, const AnfNodePtr &node) { in CreateStridedSliceCNode()
40 auto slice_op = parallel::CreateStridedSliceOp(0, begin, end, strides); in CreateStridedSliceCNode()
41 auto slice_input = parallel::CreateInput(slice_op, node, parallel::STRIDEDSLICE); in CreateStridedSliceCNode()
48 auto op = parallel::CreateAllGatherOp(group); in CreateAllGatherCNode()
49 auto allgather_input = parallel::CreateInput(op, node, "recompute_slice_allgather"); in CreateAllGatherCNode()
55 std::vector<parallel::Group> InferRepeatedRankList(const CNodePtr &cnode) { in InferRepeatedRankList()
[all …]
Dcomm_op_reuse_tag.cc24 #include "frontend/parallel/ops_info/ops_utils.h"
25 #include "frontend/parallel/device_manager.h"
27 #include "frontend/parallel/step_parallel_utils.h"
62 if (parallel::g_device_manager == nullptr) { in AddCommOpReuseTag()
63 MS_LOG(INFO) << "parallel::g_device_manager is not initialized."; in AddCommOpReuseTag()
68 if (!parallel::IsAutoParallelCareGraph(graph)) { in AddCommOpReuseTag()
81 …if (comm_prim->HasAttr(parallel::FUSION) && GetValue<int64_t>(comm_prim->GetAttr(parallel::FUSION)… in AddCommOpReuseTag()
84 (void)comm_prim->AddAttr(parallel::COMM_REUSE, MakeValue(true)); in AddCommOpReuseTag()
87 if (comm_prim->HasAttr(parallel::GROUP)) { in AddCommOpReuseTag()
88 group_name = GetValue<std::string>(comm_prim->GetAttr(parallel::GROUP)); in AddCommOpReuseTag()
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/pipeline/jit/ps/
Dpipeline_split.cc30 #include "frontend/parallel/pipeline_transformer/pipeline_transformer.h"
31 #include "frontend/parallel/pipeline_transformer/pipeline_interleave.h"
32 #include "frontend/parallel/pipeline_transformer/fold_pipeline_transformer.h"
33 #include "frontend/parallel/dynamic_shape/dynamic_shape.h"
34 #include "frontend/parallel/step_parallel.h"
35 #include "frontend/parallel/step_parallel_utils.h"
36 #include "frontend/parallel/graph_util/pipeline_split_utils.h"
37 #include "frontend/parallel/parameter_manager.h"
86 auto virtual_dataset_node = mindspore::parallel::CreateCNodeByInputsAndAttr( in CreateVirtualDataset()
87 func_graph, mindspore::parallel::VIRTUAL_DATA_SET, mindspore::parallel::VIRTUAL_DATA_SET, in CreateVirtualDataset()
[all …]
Dpass.cc41 #include "frontend/parallel/dynamic_shape/dynamic_shape.h"
42 #include "frontend/parallel/step_parallel.h"
43 #include "frontend/parallel/step_auto_parallel.h"
44 #include "frontend/parallel/graph_util/pipeline_split_utils.h"
45 #include "frontend/parallel/pipeline_transformer/pipeline_scheduler.h"
46 #include "frontend/parallel/pipeline_transformer/pipeline_interleave.h"
47 #include "frontend/parallel/pipeline_transformer/gpipe_interleave_scheduler.h"
48 #include "frontend/parallel/pass/merge_comm.h"
49 #include "frontend/parallel/cache_embedding/cache_embedding.h"
50 #include "frontend/parallel/cache_embedding/ps_embedding_cache_inserter.h"
[all …]
/third_party/mindspore/mindspore-src/source/tests/ut/cpp/python_input/gtest_input/pipeline/parse/
Dparallel_if.py37 Feature: Parallel if transformation
73 Feature: Parallel if transformation
128 Feature: Parallel if transformation
180 Feature: Parallel if transformation
232 # Location of additional if/else: if/else parallel with loop,
233 # if/else parallel with if/else, if/else inside if.
236 Feature: Parallel if transformation.
255 Feature: Parallel if transformation.
278 Feature: Parallel if transformation.
300 Feature: Parallel if transformation.
[all …]
/third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/parallel/_transformer/
Dop_parallel_config.py16 Parallel Config for the Parallel Training.
25 from mindspore.parallel._utils import _get_parallel_mode
46 …Config for MoE structure, which includes setting data parallel, model parallel and expert parallel.
49 data_parallel (int): The data parallel way. Default: 1
50 model_parallel (int): The model parallel way. Default: 1
51 expert_parallel (int): The expert parallel way. Default: 1
98 OpParallelConfig for the setting data parallel and model parallel.
101 data_parallel (int): The data parallel way. Default: 1
102 model_parallel (int): The model parallel way. Default: 1
138 PPConfig for the setting data parallel, model parallel
[all …]
/third_party/curl/docs/cmdline-opts/
Dparallel-immediate.md4 Long: parallel-immediate
5 Help: Do not wait for multiplexing (with --parallel)
11 - parallel
12 - parallel-max
14 - --parallel-immediate -Z $URL -o file1 $URL -o file2
17 # `--parallel-immediate`
19 When doing parallel transfers, this option instructs curl that it should
20 rather prefer opening up more connections in parallel at once rather than
/third_party/mindspore/mindspore-src/source/mindspore/lite/test/ut/python/
Dtest_server_inference_api.py23 # ============================ Context.parallel ============================
28 context.parallel.workers_num = "4"
35 context.parallel.workers_num = -4
42 context.parallel.config_info = 1
49 context.parallel.config_info = {1: {"test": "test"}}
56 context.parallel.config_info = {"test": "test"}
63 context.parallel.config_info = {"test": {1: "test"}}
70 context.parallel.config_info = {"test": {"test": 1}}
77 context.parallel.config_path = 1
84 context.parallel.config_path = "test.cfg"
[all …]

12345678910>>...138