/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/ |
D | split_gpu_kernel.h | 66 axis_ = static_cast<int64_t>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 67 if (axis_ < -dims || axis_ >= dims) { in Init() 70 if (axis_ < 0) { in Init() 71 axis_ += dims; in Init() 76 axis_ = AxisTransform(origin_data_format, input_format, axis_); in Init() 89 if (i > axis_) { in Init() 93 if (i == axis_) { in Init() 98 axis_step_ = input_shape[axis_] / output_num_; in Init() 121 axis_ = 0; in ResetResource() 155 if (axis_ < -dims || axis_ >= dims) { in CheckParam() [all …]
|
D | gatherv2_gpu_kernel.h | 51 … cudaMemcpyAsync(&axis_, axis_device_address, sizeof(int64_t), cudaMemcpyDeviceToHost, in Launch() 58 auto input_dim1 = input_shapes_[IntToSize(axis_)]; in Launch() 90 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 91 if (axis_ < -dims || axis_ >= dims) { in Init() 106 axis_ = 0; in ResetResource() 128 if (axis_ < 0) { in Reshape() 129 axis_ = axis_ + SizeToInt(input_shapes_.size()); in Reshape() 132 for (size_t i = 0; i < std::min(IntToSize(axis_), output_shapes_.size()); i++) { in Reshape() 140 for (size_t i = IntToSize(axis_) + indices_shapes_.size(); i < output_shapes_.size(); i++) { in Reshape() 153 int64_t axis_; variable
|
D | concatv2_gpu_kernel.h | 32 : axis_(0), in ConcatV2GpuFwdKernel() 80 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 81 if (axis_ < -dims || axis_ >= dims) { in Init() 85 if (axis_ < 0) { in Init() 86 axis_ += dims; in Init() 90 axis_ = AxisTransform(origin_data_format, input_format, axis_); in Init() 107 len_axis_[current_dim] = SizeToInt(input_shape[axis_]); in Init() 118 if (i > axis_) { in Init() 122 if (i == axis_) { in Init() 143 int axis_; variable
|
D | gather_grad_gpu_kernel.h | 30 GatherGradGpuKernel() : axis_(0), is_null_input_(false) {} in GatherGradGpuKernel() 72 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "dim")); in Init() 73 if (axis_ < -dims || axis_ >= dims) { in Init() 77 if (axis_ < 0) { in Init() 78 axis_ += dims; in Init() 102 for (size_t i = 0; i < IntToSize(axis_); i++) { in Reshape() 105 size_t dim_at_axis_index = index_shapes_[IntToSize(axis_)]; in Reshape() 106 size_t dim_at_axis_output = output_shapes_[IntToSize(axis_)]; in Reshape() 108 for (size_t i = IntToSize(axis_) + 1; i < output_shapes_.size(); i++) { in Reshape() 131 int axis_; variable
|
D | gather_gpu_kernel.h | 30 GatherGpuFwdKernel() : axis_(0), is_null_input_(false) {} in GatherGpuFwdKernel() 72 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "dim")); in Init() 73 if (axis_ < -dims || axis_ >= dims) { in Init() 77 if (axis_ < 0) { in Init() 78 axis_ += dims; in Init() 101 for (size_t i = 0; i < IntToSize(axis_); i++) { in Reshape() 104 size_t dim_at_axis_input = input_shapes_[IntToSize(axis_)]; in Reshape() 105 size_t dim_at_axis_output = output_shapes_[IntToSize(axis_)]; in Reshape() 107 for (size_t i = IntToSize(axis_) + 1; i < output_shapes_.size(); i++) { in Reshape() 130 int axis_; variable
|
D | reverse_v2_gpu_kernel.h | 61 … cudaMemcpyAsync(axis_device, &axis_[0], workspace_size_list_[2], cudaMemcpyHostToDevice, in Launch() 66 axis_.size(), reinterpret_cast<cudaStream_t>(stream_ptr)); in Launch() 106 axis_ = GetAttr<std::vector<int64_t>>(kernel_node, "axis"); in Init() 107 if (axis_.size() < 1) { in Init() 108 …N) << "For 'ReverseV2GpuKernel', the rank of axis cannot be less than 1, bot got " << axis_.size(); in Init() 110 for (int64_t &dimension : axis_) { in Init() 127 axis_.clear(); in ResetResource() 141 workspace_size_list_.push_back(axis_.size() * sizeof(int64_t)); in InitSizeLists() 149 std::vector<int64_t> axis_; variable
|
D | sort_gpu_kernel.h | 92 …opK(outer_size_, inner_size_, intermediate_input_device, static_cast<int32_t>(input_shape_[axis_]), in Launch() 153 axis_ = GetAttr<int64_t>(kernel_node, "axis"); in Init() 155 if (axis_ < 0) { in Init() 156 axis_ += input_rank_; in Init() 158 if ((size_t)axis_ >= input_rank_) { in Init() 159 …RROR) << "For 'SortGpuKernel', axis should be less than the rank of input, bot got axis: " << axis_ in Init() 166 std::swap(perm_[input_rank_ - 1], perm_[axis_]); in Init() 169 std::swap(transposed_shape_[input_rank_ - 1], transposed_shape_[axis_]); in Init() 171 inner_size_ = input_shape_[axis_]; in Init() 188 axis_ = 0; in ResetResource() [all …]
|
D | unpack_gpu_kernel.h | 32 …: axis_(0), is_null_input_(false), output_num_(0), input_size_(1), dims_after_axis_(1), outputs_ho… in UnpackGpuFwdKernel() 62 axis_ = static_cast<int32_t>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 63 if (axis_ < 0) { in Init() 65 axis_ += SizeToInt(input_shape.size()); in Init() 69 axis_ = AxisTransform(origin_data_format, input_format, axis_); in Init() 98 if (i > IntToSize(axis_)) { in Init() 119 int axis_; variable
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/ |
D | tbe_kernel_reduce_selecter.cc | 37 axis_.clear(); in GetShapeInfo() 52 axis_ = GetReduceAttrAxis(cnode_ptr_); in GetShapeInfo() 62 if (!keep_dims_ || axis_.empty()) { in IsReduceSupport5HD() 65 …auto reduce_c_axis = std::any_of(axis_.begin(), axis_.end(), [](const size_t &elem) { return (elem… in IsReduceSupport5HD() 78 if (!keep_dims_ || axis_.empty()) { in IsReduceSupportNDC1HWC0() 81 …auto reduce_c_axis = std::any_of(axis_.begin(), axis_.end(), [](const size_t &elem) { return (elem… in IsReduceSupportNDC1HWC0() 104 if (axis_.empty()) { in IsReduceSupportFracNZ() 107 auto reduce_last_axis = std::any_of(axis_.begin(), axis_.end(), [this](const size_t &elem) { in IsReduceSupportFracNZ() 123 if (!keep_dims_ || axis_.empty()) { in IsFracZAndC1HWNCoC0Common() 126 auto reduce_n_c_axis = std::any_of(axis_.begin(), axis_.end(), in IsFracZAndC1HWNCoC0Common()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/kernel/ |
D | softmax.cc | 47 axis_ = parameter->axis_; in CheckSpecs() 53 if (axis_ < 0) { in CheckSpecs() 54 axis_ = in_shape.size() + axis_; in CheckSpecs() 56 axis_ += DIMENSION_4D - in_shape.size(); in CheckSpecs() 57 if (axis_ != 1 && axis_ != 2 && axis_ != 3) { in CheckSpecs() 69 if (out_shape_.H == 1 && out_shape_.W == 1 && axis_ == 3) { in Prepare() 75 kernel_name += "Axis" + std::to_string(axis_); in Prepare() 111 if (axis_ == 1) { in SetGlobalLocal() 114 } else if (axis_ == 2) { in SetGlobalLocal() 117 } else if (axis_ == 3) { in SetGlobalLocal()
|
D | concat.cc | 86 MS_LOG(DEBUG) << " concat at axis=: " << param->axis_; in CheckSpecs() 98 axis_ = param->axis_; in CheckSpecs() 99 if (axis_ < 0) { in CheckSpecs() 100 axis_ += in_tensors_.front()->shape().size(); in CheckSpecs() 102 if (axis_ < 0 || axis_ > 3) { in CheckSpecs() 106 if (out_tensors_shape_size < 4 && type() == PrimitiveType_Concat && axis_ != 0) { in CheckSpecs() 108 axis_ = axis_ + 2; in CheckSpecs() 110 axis_ = axis_ + 1; in CheckSpecs() 112 …MS_LOG(WARNING) << " Unsupported axis =: " << axis_ << " shape().size()=: " << out_tensors_shap… in CheckSpecs() 133 if (axis_ == 3 && !Align_) { in SetConstArgs() [all …]
|
D | stack.cc | 73 axis_ = param->axis_; in CheckSpecs() 94 axis_ = axis_ < 0 ? axis_ + in_tensors_[0]->shape().size() : axis_; in CheckSpecs() 95 if (axis_ > 3) { in CheckSpecs() 99 if (axis_ > in_tensors_[0]->shape().size()) { in CheckSpecs() 144 axis_ == 1) || in SetGlobalLocal() 145 (in_tensors_[0]->shape().size() == DIMENSION_3D && axis_ == 2)) { in SetGlobalLocal() 165 if (axis_ == 0) { in Prepare() 168 if (in_tensors_[0]->shape().size() == DIMENSION_1D && axis_ == 1) { in Prepare() 169 axis_ += 2; in Prepare() 170 } else if (in_tensors_[0]->shape().size() == axis_) { in Prepare() [all …]
|
D | argminmax.cc | 56 auto axis = (param->axis_ + dims_size) % dims_size; in CheckSpecs() 69 cl_int4 flags = {param->out_value_, param->get_max_, param->axis_, param->topk_}; in SetConstArgs() 113 int reduce_len = GetUpPow2(in_shape.at(param->axis_)); in SetGlobalLocalPre() 120 …src_size_ = {std::accumulate(in_shape.begin() + param->axis_ + 1, in_shape.end(), 1, std::multipli… in SetGlobalLocalPre() 121 … std::accumulate(in_shape.begin(), in_shape.begin() + param->axis_, 1, std::multiplies<int>()), in SetGlobalLocalPre() 122 … std::accumulate(in_shape.begin() + param->axis_, in_shape.end(), 1, std::multiplies<int>()), in SetGlobalLocalPre() 123 static_cast<int>(in_shape.at(param->axis_))}; in SetGlobalLocalPre() 124 int out_axis = (param->axis_ == 3 && param->topk_ == 1 && !param->keep_dims_) ? 4 : param->axis_; in SetGlobalLocalPre() 126 …std::accumulate(in_shape_align.begin() + param->axis_ + 1, in_shape_align.end(), 1, std::multiplie… in SetGlobalLocalPre() 127 …std::accumulate(in_shape_align.begin() + param->axis_, in_shape_align.end(), 1, std::multiplies<in… in SetGlobalLocalPre() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | reduce_cpu_kernel.cc | 44 axis_ = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, AXIS); in InitKernel() 46 (void)axis_.emplace_back(AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS)); in InitKernel() 52 (void)std::transform(axis_.begin(), axis_.end(), axis_.begin(), in InitKernel() 54 sort(axis_.begin(), axis_.end()); in InitKernel() 56 auto last = std::unique(axis_.begin(), axis_.end()); in InitKernel() 57 axis_.erase(last, axis_.end()); in InitKernel() 89 …if ((reduce_type_ == kReduceMean || reduce_type_ == kReduceSum) && axis_.size() == 1 && axis_[0] =… in InitKernel() 104 if (axis_.empty() || input_shape_.empty() || input_shape_.size() == 1) { in Launch() 125 if (j == axis_.size() || i != axis_[j]) { in Launch() 133 for (auto &it : axis_) { in Launch()
|
D | split_cpu_kernel.cc | 32 axis_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS); in InitKernel() 67 param.split_dim_ = LongToInt(axis_); in LaunchSplit() 79 for (size_t i = 0; i < static_cast<size_t>(axis_); ++i) { in LaunchSplit() 110 if (axis_ < -dims || axis_ >= dims) { in CheckParam() 111 MS_LOG(EXCEPTION) << "Attr axis_ " << axis_ << " must be in " << -dims << "~" << dims; in CheckParam() 113 if (axis_ < 0) { in CheckParam() 114 axis_ += SizeToLong(input_shape_.size()); in CheckParam() 116 if (output_num_ > IntToSize(input_shape_[LongToUlong(axis_)])) { in CheckParam() 117 …S_LOG(EXCEPTION) << "Attr output_num " << output_num_ << " must less than " << input_shape_[axis_]; in CheckParam()
|
D | gather_cpu_kernel.cc | 43 axis_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS); in InitKernel() 57 axis_ = reinterpret_cast<int64_t *>(inputs[2]->addr)[0]; in Launch() 61 if (axis_ < -dims || axis_ >= dims) { in Launch() 64 } else if (axis_ < 0) { in Launch() 65 axis_ = axis_ + dims; in Launch() 77 auto axis = static_cast<size_t>(axis_); in ParallelRun()
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/ |
D | softmax_grad_fp32_tests.cc | 36 softmax_param->axis_ = axis; in InitSoftMaxParam() 46 softmax_param->axis_ = axis; in InitSoftMaxParam() 62 if (softmax_param->axis_ == -1) softmax_param->axis_ = softmax_param->n_dim_ - 1; in TEST_F() 63 for (int i = softmax_param->axis_ + 1; i < softmax_param->n_dim_; i++) { in TEST_F() 68 …sum_mul = new (std::nothrow) float[inner_size * softmax_param->input_shape_[softmax_param->axis_]]; in TEST_F() 122 if (softmax_param->axis_ == -1) softmax_param->axis_ = softmax_param->n_dim_ - 1; in TEST_F() 123 for (int i = softmax_param->axis_ + 1; i < softmax_param->n_dim_; i++) { in TEST_F() 128 …sum_mul = new (std::nothrow) float[inner_size * softmax_param->input_shape_[softmax_param->axis_]]; in TEST_F() 187 if (softmax_param->axis_ == -1) softmax_param->axis_ = softmax_param->n_dim_ - 1; in TEST_F() 188 for (int i = softmax_param->axis_ + 1; i < softmax_param->n_dim_; i++) { in TEST_F() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/math/ |
D | cumprod_gpu_kernel.h | 35 axis_(0), in CumProdGpuKernel() 70 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 74 if (axis_ >= input_dim_length) { in Init() 75 MS_LOG(EXCEPTION) << "Axis is: " << axis_ << " out of bounds."; in Init() 77 while (axis_ < 0) { in Init() 78 axis_ += input_dim_length; in Init() 98 dims_[1] = shape_[IntToSize(axis_)]; in Reshape() 100 for (size_t i = 0; i < IntToSize(axis_); i++) { in Reshape() 103 for (size_t i = IntToSize(axis_) + 1; i < shape_.size(); i++) { in Reshape() 113 int axis_; variable
|
D | cumsum_gpu_kernel.h | 35 axis_(0), in CumSumGpuKernel() 70 axis_ = static_cast<int>(GetAttr<int64_t>(kernel_node, "axis")); in Init() 74 if (axis_ >= input_dim_length) { in Init() 75 MS_LOG(EXCEPTION) << "Axis is: " << axis_ << " out of bounds."; in Init() 77 while (axis_ < 0) { in Init() 78 axis_ += input_dim_length; in Init() 98 dims_[1] = shape_[IntToSize(axis_)]; in Reshape() 100 for (size_t i = 0; i < IntToSize(axis_); i++) { in Reshape() 103 for (size_t i = IntToSize(axis_) + 1; i < shape_.size(); i++) { in Reshape() 113 int axis_; variable
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ops_info/ |
D | l2_normalize_info.cc | 36 int64_t axis_index = axis_; in CheckStrategy() 37 if (axis_ < 0) { in CheckStrategy() 39 axis_index = static_cast<int64_t>(input_dim) + axis_; in CheckStrategy() 55 axis_ = GetValue<std::vector<int64_t>>(iter->second)[0]; in GetAttrs() 67 int64_t axis_index = axis_; in GenerateOpStrategies() 68 if (axis_ < 0) { in GenerateOpStrategies() 70 axis_index = static_cast<int64_t>(input_dim) + axis_; in GenerateOpStrategies()
|
D | gather_v2_info.cc | 66 axis_ = axis; in GetAttrs() 89 axis_strategy_ = strategy->GetInputDim().at(0).at(LongToSize(axis_)); in CheckStrategy() 138 (void)tensor_map_out.erase(tensor_map_out.begin() + axis_); in InferTensorMap() 140 (void)tensor_map_out.insert(tensor_map_out.begin() + axis_, index_size_ - 1, -1); in InferTensorMap() 150 tensor_map_in_index.push_back(SizeToLong(size) - axis_ - 1); in InferTensorMap() 228 for (size_t i = LongToSize(axis_) + 1; i < dev_matrix_shape_.size(); i++) { in InferTensorSubOps() 231 if ((axis_ >= SizeToLong(dev_matrix_shape_.size())) || axis_ < 0) { in InferTensorSubOps() 232 MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << dev_matrix_shape_.size() << ")."; in InferTensorSubOps() 234 int64_t mod_p = mod_n * dev_matrix_shape_.at(LongToSize(axis_)); in InferTensorSubOps() 243 if ((axis_ >= SizeToLong(inputs_shape_.at(0).size())) || axis_ < 0) { in InferTensorSubOps() [all …]
|
D | gather_v2_p_info.cc | 138 axis_ = axis; in GetAttrs() 155 if (manual_split_ && (axis_ != 0)) { in GetAttrs() 156 MS_LOG(ERROR) << name_ << ": The axis or offset must be 0 if manual split, bug got " << axis_; in GetAttrs() 234 if ((param_strategy.at(LongToSize(axis_)) != 1) && (product_i != 1)) { in CheckSplitAxisStrategy() 235 … MS_LOG(DEBUG) << name_ << ": param is split at dim (axis)" << axis_ << " ,index can't be split."; in CheckSplitAxisStrategy() 241 …if ((product_p != stage_device_size_) && (param_strategy.at(LongToSize(axis_)) != 1) && (axis_ != … in CheckSplitAxisStrategy() 246 …if ((product_p != stage_device_size_) && (param_strategy.at(LongToSize(axis_)) != 1) && (axis_ == … in CheckSplitAxisStrategy() 259 if (axis_ != 0) { in ShardBatchAndAxis() 292 if ((axis_ == 0) && (index_shape.at(0) % param_strategy.at(0) != 0) && !dynamic_shape_indices_) { in SetAttribute() 361 …if (axis_ != 0 && param_shape.at(0) % (param_strategy.at(0) * param_strategy.at(LongToSize(axis_))… in CheckStrategy() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
D | cumsum_fp32.cc | 68 param_->axis_ = *axis_data; in ReSize() 69 if (param_->axis_ < 0) { in ReSize() 70 param_->axis_ += input_tensor->shape().size(); in ReSize() 72 if (param_->axis_ < 0 || param_->axis_ >= static_cast<int>(input_tensor->shape().size())) { in ReSize() 73 MS_LOG(ERROR) << "axis " << param_->axis_ << " error."; in ReSize() 77 for (int i = 0; i < param_->axis_; ++i) { in ReSize() 80 axis_dim_ = input_tensor->shape().at(param_->axis_); in ReSize() 82 for (int i = param_->axis_ + 1; i < static_cast<int>(input_tensor->shape().size()); ++i) { in ReSize()
|
D | concat_fp32.cc | 40 concat_param_->axis_ = in ReSize() 41 …concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_pa… in ReSize() 55 MS_CHECK_LT(concat_param_->axis_, static_cast<int>(in_tensors_[i]->shape().size()), RET_ERROR); in DoConcat() 59 MS_CHECK_LT(concat_param_->axis_, static_cast<int>(output_shape.size()), RET_ERROR); in DoConcat() 65 …Concat(inputs_addr.data(), input_num, concat_param_->axis_, inputs_output_shape.data(), output_sha… in DoConcat()
|
/third_party/boost/boost/histogram/axis/ |
D | interval_view.hpp | 24 interval_view(const Axis& axis, index_type idx) : axis_(axis), idx_(idx) {} in interval_view() 29 decltype(auto) lower() const noexcept { return axis_.value(idx_); } in lower() 31 decltype(auto) upper() const noexcept { return axis_.value(idx_ + 1); } in upper() 33 decltype(auto) center() const noexcept { return axis_.value(idx_ + 0.5); } in center() 48 const Axis& axis_; member in boost::histogram::axis::interval_view
|