/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/enhancer/ |
D | insert_pad_for_nms_with_mask.cc | 38 const std::vector<size_t> &origin_shape) { in InsertPadToGraph() argument 46 AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, pad.get()); in InsertPadToGraph() 68 auto origin_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, input_idx); in Process() local 69 if (!(origin_shape.size() == kShapeSize && origin_shape[1] == kShapeValue5)) { in Process() 72 origin_shape[1] = kShapeValue8; in Process() 73 auto pad = InsertPadToGraph(func_graph, cur_input, origin_type, origin_shape); in Process()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/format_type/ |
D | insert_transpose_for_basiclstm_op.cc | 47 auto origin_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 1); in Insert() local 48 auto dst_shape = {origin_shape[1], origin_shape[0]}; in Insert() 65 auto origin_shape = AnfAlgo::GetOutputInferShape(cnode, output_idx); in Insert() local 66 if (origin_shape.size() > 1 && output_idx == 0) { in Insert() 68 auto dst_shape = {origin_shape[0], origin_shape[1]}; in Insert()
|
D | insert_cast.cc | 50 const auto origin_shape = AnfAlgo::GetOutputDetailShape(cnode, output_idx); in InsertCastForMultipleOutput() local 65 …e_node = AddCastOpNodeToGraph(func_graph, getitem, dev_fmt, device_type, origin_type, origin_shape, in InsertCastForMultipleOutput() 96 const abstract::BaseShapePtr origin_shape = AnfAlgo::GetOutputDetailShape(cnode, 0); in InsertCastForOutput() local 101 …ace_node = AddCastOpNodeToGraph(func_graph, cnode, dev_fmt, device_type, origin_type, origin_shape, in InsertCastForOutput()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ |
D | ascend_helper.cc | 36 bool NeedInsertTransData(const std::vector<size_t> &origin_shape, const std::string &format) { in NeedInsertTransData() argument 37 …bool shape_check = origin_shape.size() > 1 || (origin_shape.size() == 1 && origin_shape[0] % kCube… in NeedInsertTransData() 125 std::vector<size_t> origin_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, index); in GetTransInputNodePtr() local 127 if (NeedInsertTransData(origin_shape, dest_format)) { in GetTransInputNodePtr() 144 std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(node, 0); in InsertTransOpForSingleOutput() local 149 if (NeedInsertTransData(origin_shape, output_format)) { in InsertTransOpForSingleOutput() 176 std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(node, output_idx); in InsertTransOpForMultipleOutput() local 177 if (NeedInsertTransData(origin_shape, output_format)) { in InsertTransOpForMultipleOutput() 327 const abstract::BaseShapePtr &origin_shape, const TypeId &origin_type, in AddCastOpNodeToGraph() argument 330 MS_EXCEPTION_IF_NULL(origin_shape); in AddCastOpNodeToGraph() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/cpu/ |
D | insert_cast_cpu.cc | 37 … const std::vector<size_t> &origin_shape, const TypeId &origin_type) { in AddCastOpNodeToGraph() argument 54 AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, cast.get()); in AddCastOpNodeToGraph() 84 …const std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(prev_node.first, prev_node.s… in InsertCast() local 88 …AddCastOpNodeToGraph(func_graph, cur_input, dev_fmt, origin_type, device_type, origin_shape, devic… in InsertCast() 112 const std::vector<size_t> origin_shape = in InsertCastForGraphOutput() local 115 …stOpNodeToGraph(func_graph, cur_input, dev_fmt, device_type, infer_type, origin_shape, infer_type); in InsertCastForGraphOutput()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/akg/ |
D | akg_kernel_attrs_process.cc | 56 std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(anf_node, 0); in SetAkgAttrsForFive2Four() local 57 if (origin_shape.size() != kShape4dDims) { in SetAkgAttrsForFive2Four() 58 …ON) << "The dim of origin_shape is not equal to 4, but it's dim is " << origin_shape.size() << "."; in SetAkgAttrsForFive2Four() 61 …(void)std::transform(origin_shape.begin(), origin_shape.end(), std::back_inserter(shape_transform), in SetAkgAttrsForFive2Four() 62 [](const int &origin_shape) { return static_cast<int>(origin_shape); }); in SetAkgAttrsForFive2Four() argument
|
/third_party/mindspore/mindspore/nn/probability/distribution/ |
D | poisson.py | 266 origin_shape = shape + self.shape(rate) 267 if origin_shape == (): 270 sample_shape = origin_shape 273 if origin_shape == ():
|
D | gumbel.py | 227 origin_shape = shape + self._broadcast_shape 228 if origin_shape == (): 231 sample_shape = origin_shape 235 if origin_shape == ():
|
D | normal.py | 313 origin_shape = shape + batch_shape 314 if origin_shape == (): 317 sample_shape = origin_shape 320 if origin_shape == ():
|
D | exponential.py | 323 origin_shape = shape + self.shape(rate) 324 if origin_shape == (): 327 sample_shape = origin_shape 333 if origin_shape == ():
|
D | geometric.py | 314 origin_shape = shape + self.shape(probs1) 315 if origin_shape == (): 318 sample_shape = origin_shape 324 if origin_shape == ():
|
D | bernoulli.py | 314 origin_shape = shape + self.shape(probs1) 315 if origin_shape == (): 318 sample_shape = origin_shape 324 if origin_shape == ():
|
D | gamma.py | 356 origin_shape = shape + batch_shape 357 if origin_shape == (): 360 sample_shape = origin_shape 363 if origin_shape == ():
|
D | cauchy.py | 354 origin_shape = shape + batch_shape 355 if origin_shape == (): 358 sample_shape = origin_shape 364 if origin_shape == ():
|
D | beta.py | 345 origin_shape = shape + batch_shape 346 if origin_shape == (): 349 sample_shape = origin_shape 355 if origin_shape == ():
|
D | uniform.py | 346 origin_shape = shape + broadcast_shape 347 if origin_shape == (): 350 sample_shape = origin_shape 356 if origin_shape == ():
|
D | logistic.py | 348 origin_shape = shape + batch_shape 349 if origin_shape == (): 352 sample_shape = origin_shape 359 if origin_shape == ():
|
/third_party/mindspore/mindspore/lite/tools/optimizer/common/ |
D | gllo_utils.cc | 66 void TransposeData(const ShapeVector &origin_shape, const ShapeVector &cur_shape, const std::vector… in TransposeData() argument 69 MS_ASSERT(origin_shape.size() == cur_shape.size() && cur_shape.size() == perm.size()); in TransposeData() 70 int count = std::accumulate(origin_shape.begin(), origin_shape.end(), 1, std::multiplies<int>()); in TransposeData() 84 for (int j = static_cast<int>(origin_shape.size()) - 1; j >= 0; --j) { in TransposeData() 85 MS_ASSERT(origin_shape[j] > 0); in TransposeData() 86 position_map[j] = temp % origin_shape[j]; in TransposeData() 87 temp /= origin_shape[j]; in TransposeData() 100 auto origin_shape = tensor->shape_c(); in DoTransposeData() local 101 if (origin_shape.size() != kInputSizeFour) { in DoTransposeData() 102 MS_LOG(ERROR) << "Filter dim-num is not supported, dim-num: " << origin_shape.size(); in DoTransposeData() [all …]
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/ascend/dump/proto/ |
D | op_mapping_info.proto | 34 Shape origin_shape = 10; field 43 Shape origin_shape = 6; field
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ |
D | parameter_manager.cc | 582 auto origin_shape = tensor_layout->tensor_shape().array(); in HandleAdaFactorOpt() local 588 origin_shape.pop_back(); in HandleAdaFactorOpt() 593 …(void)origin_shape.erase(origin_shape.begin() + static_cast<different_type>(SECOND_FROM_END(shape_… in HandleAdaFactorOpt() 601 if (new_tensor_layout.InitFromVector(dev_mat, tensor_map, origin_shape) != SUCCESS) { in HandleAdaFactorOpt() 612 …G(INFO) << "Set the slice shape for " << row_col_param_name << ", origin shape is " << origin_shape in HandleAdaFactorOpt()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/cl/ |
D | strassen.cl | 62 const int origin_shape = 2 * ci_co_4; 63 int index_1 = (gidx + offset.x) * origin_shape + gidy + offset.y; 64 int index_2 = (gidx + offset.z) * origin_shape + gidy + offset.w;
|
/third_party/mindspore/mindspore/parallel/nn/ |
D | moe.py | 214 origin_shape = self.shape(expert_mask) 215 tokens_per_device = origin_shape[1] 230 cum_sum = self.reshape(cum_sum, (origin_shape[0], origin_shape[2], tokens_per_device))
|
/third_party/mindspore/mindspore/ops/operations/ |
D | _thor_ops.py | 129 def __init__(self, origin_shape=(-1, -1)): argument 132 self.origin_shape = origin_shape
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/graph_kernel/ |
D | decrease_compute_precision.cc | 36 … const TypeId &input_type, const TypeId &output_type, const std::vector<size_t> &origin_shape, in AddCastCNode() argument 55 AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, cast.get()); in AddCastCNode()
|
/third_party/mindspore/tests/vm_impl/ |
D | nn_ops_vm_impl.py | 197 def vm_impl(dout, origin_shape): argument 199 …out = vm.avg_pool_grad(dout, origin_shape, self.kernel_size[-2], self.kernel_size[-1], self.stride…
|