/third_party/mindspore/mindspore/numpy/ |
D | array_ops.py | 73 ndim = a.ndim + len(axis) 74 axis = _canonicalize_axis(axis, ndim) 199 ndim = F.tuple_len(shape) 201 axis = _check_axes_range(axis, ndim) 202 start = _check_start_normalize(start, ndim) 205 perm = F.make_range(0, ndim) 208 if axis + 1 < ndim: 214 if start < ndim: 472 _check_axis_in_range(axis, arr.ndim) 514 if tensor.ndim < 1: [all …]
|
D | utils_const.py | 81 def _check_start_normalize(start, ndim): argument 83 if start < -ndim or start > ndim: 86 start = start + ndim 91 def _check_axes_range(axes, ndim): argument 109 axes = _canonicalize_axis(axes, ndim) 149 def _check_axis_in_range(axis, ndim): argument 153 if not -ndim <= axis < ndim: 155 return axis % ndim 159 def _check_axis_valid(axes, ndim): argument 165 axes = F.make_range(ndim) [all …]
|
D | array_creations.py | 90 if ndmin > res.ndim: 470 iota_shape = _list_comprehensions(start.ndim+1, 1, True) 519 axis = _canonicalize_axis(axis, start.ndim+1) 592 axis = _canonicalize_axis(axis, start.ndim+1) 641 axis = _canonicalize_axis(axis, start.ndim+1) 1289 ndim = len(grids) 1294 grid_index = _index(i, ndim, cartesian=cartesian) 1299 grid_index = _index(i, ndim, cartesian=cartesian) 1300 shape_expanded = _expanded_shape(ndim, shape_out[grid_index], grid_index) 1303 x = F.tile(x, _tile_size(shape_expanded, shape_out, ndim)) [all …]
|
D | math_ops.py | 572 if x1.ndim == 0 and x2.ndim == 0: 575 if x1.ndim == 0: 577 elif x2.ndim == 0: 1029 axis = _canonicalize_axis(axis, x.ndim) 1044 perm = _expanded_shape(x.ndim, weights.shape[0], axis) 1052 … sum_of_weights = _broadcast_to(sum_of_weights, sum_of_weights.shape, x_avg.shape, x_avg.ndim) 1316 if x1.ndim == 0 and x2.ndim == 0: 1319 if x1.ndim == 0: 1321 elif x2.ndim == 0: 1934 axis = _canonicalize_axis(axis, a.ndim) [all …]
|
D | utils.py | 78 def _expand(x, ndim, axis=0): argument 80 shape = _add_unit_axes(F.shape(x), ndim, axis == -1) 100 axis = F.make_range(x.ndim) 177 index_start = (0,) * f.ndim
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/ |
D | arithmetic_infer.c | 39 … const int *input_shape1, int *ndim, int *in_shape0, int *in_shape1, int *out_shape, in BroadCastInferShape() argument 44 …MakeUpInputShapes(input_shape0_size, input_shape1_size, input_shape0, input_shape1, ndim, in_shape… in BroadCastInferShape() 45 if (*ndim >= MAX_SHAPE_SIZE) { in BroadCastInferShape() 49 return BroadCastOutputShape(in_shape0, in_shape1, *ndim, out_shape, has_broad_cast); in BroadCastInferShape() 80 int ndim = input_shape0_size; in ArithmeticInferShape() local 82 …if (BroadCastInferShape(input_shape0_size, input_shape1_size, input_shape0, input_shape1, &ndim, i… in ArithmeticInferShape() 87 SetShapeArray(output, output_shape, ndim); in ArithmeticInferShape() 90 param->ndim_ = ndim; in ArithmeticInferShape() 91 if (ndim > MAX_SHAPE_SIZE) { in ArithmeticInferShape() 94 memcpy(param->in_shape0_, in_shape0, ndim * sizeof(int)); in ArithmeticInferShape() [all …]
|
D | broadcast_to_infer.c | 67 const int *input_shape1, int *ndim, int *in_shape0, int *in_shape1) { in MakeUpInputShapes() argument 69 *ndim = input_shape1_size; in MakeUpInputShapes() 81 *ndim = input_shape0_size; in MakeUpInputShapes() 100 int BroadCastOutputShape(const int *in_shape0, const int *in_shape1, const int ndim, int *out_shape, in BroadCastOutputShape() argument 102 for (int i = 0; i < ndim; i++) { in BroadCastOutputShape() 120 const int *input_shape1, int *ndim, int *out_shape, bool *has_broad_cast) { in BroadCastToShape() argument 128 …MakeUpInputShapes(input_shape0_size, input_shape1_size, input_shape0, input_shape1, ndim, in_shape… in BroadCastToShape() 129 if (*ndim >= MAX_SHAPE_SIZE) { in BroadCastToShape() 133 return BroadCastOutputShape(in_shape0, in_shape1, *ndim, out_shape, has_broad_cast); in BroadCastToShape() 159 int ndim = input_shape_size; in BroadcastToInferShape() local [all …]
|
D | strided_slice_infer.c | 81 int GenerateAxes(const TensorC *axes_tensor, int *axes, int num, int ndim) { in GenerateAxes() argument 102 axes[i] += ndim; in GenerateAxes() 109 int HandleAxesInputExist(const TensorC *const *inputs, int *ndim, int *in_shape, int *begins, int *… in HandleAxesInputExist() argument 122 *ndim = (int)(input_tensor->shape_size_); in HandleAxesInputExist() 136 int ret = GenerateAxes(axes_tensor, axes, begin_ndim, *ndim); in HandleAxesInputExist() 141 if (*ndim > MAX_SHAPE_SIZE || *ndim < 0) { in HandleAxesInputExist() 144 for (int i = 0; i < *ndim; i++) { in HandleAxesInputExist() 149 for (int i = 0; i < *ndim; ++i) { in HandleAxesInputExist() 152 for (int i = 0; i < *ndim; ++i) { in HandleAxesInputExist()
|
D | broadcast_to_infer.h | 29 const int *input_shape1, int *ndim, int *in_shape0, int *in_shape1); 30 int BroadCastOutputShape(const int *in_shape0, const int *in_shape1, const int ndim, int *out_shape,
|
/third_party/python/Objects/ |
D | memoryobject.c | 256 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0) 261 assert(dest->ndim > 0 && src->ndim > 0); in last_dim_is_contiguous() 264 dest->strides[dest->ndim-1] == dest->itemsize && in last_dim_is_contiguous() 265 src->strides[src->ndim-1] == src->itemsize); in last_dim_is_contiguous() 299 if (dest->ndim != src->ndim) in equiv_shape() 302 for (i = 0; i < dest->ndim; i++) { in equiv_shape() 362 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument 369 assert(ndim >= 1); in copy_rec() 371 if (ndim == 1) { in copy_rec() 383 copy_rec(shape+1, ndim-1, itemsize, in copy_rec() [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/opencl/ |
D | fullconnection_tests.cc | 26 … std::vector<int> *bias_shape, std::vector<int> *output_shape, int ndim, int ci, int co, in CreateParameter() argument 34 if (ndim == 2) { in CreateParameter() 39 } else if (ndim == 4) { in CreateParameter() 44 } else if (ndim == 3) { in CreateParameter() 55 int ndim = 2; variable 65 …auto *param = CreateParameter(&input_shape, &weight_shape, &bias_shape, &output_shape, ndim, ci, c… 74 int ndim = 4; variable 87 …auto *param = CreateParameter(&input_shape, &weight_shape, &bias_shape, &output_shape, ndim, ci, c… 96 int ndim = 3; variable 110 …auto *param = CreateParameter(&input_shape, &weight_shape, &bias_shape, &output_shape, ndim, ci, c… [all …]
|
/third_party/mindspore/mindspore/nn/metrics/ |
D | metric.py | 105 if data.ndim > 1 and np.equal(data ** 2, data).all(): 116 if preds.ndim > target.ndim: 257 if y_pred.ndim != y.ndim + 1: 259 … 'but got y_pred: {} dims and y: {} dims'.format(y_pred.ndim, y.ndim)) 264 if y_pred.ndim != y.ndim: 266 'dims and y: {} dims.'.format(self._type, y_pred.ndim, y.ndim))
|
D | loss.py | 64 if loss.ndim == 0: 67 if loss.ndim != 1: 68 raise ValueError("The dimension of loss must be 1, but got {}".format(loss.ndim))
|
D | accuracy.py | 83 if self._type == 'classification' and y_pred.ndim == y.ndim and self._check_onehot_data(y): 99 dimension_index = y_pred.ndim - 1
|
D | confusion_matrix.py | 103 if not (y_pred.ndim == y.ndim or y_pred.ndim == y.ndim + 1): 107 if y_pred.ndim == y.ndim + 1: 110 if y_pred.ndim == y.ndim and y_pred.dtype in (np.float16, np.float32, np.float64): 316 dims = y_pred.ndim 591 input_dim = confusion_matrix.ndim
|
/third_party/python/Modules/ |
D | _testbuffer.c | 154 base->ndim = 1; in ndbuf_new() 267 if (ndbuf->base.ndim == 0) in init_flags() 473 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument 480 assert(ndim >= 1); in copy_rec() 482 if (ndim == 1) { in copy_rec() 506 copy_rec(shape+1, ndim-1, itemsize, in copy_rec() 520 dest->ndim != src->ndim) in cmp_structure() 523 for (i = 0; i < dest->ndim; i++) { in cmp_structure() 541 assert(dest->ndim > 0); in copy_buffer() 549 if ((dest->suboffsets && dest->suboffsets[dest->ndim-1] >= 0) || in copy_buffer() [all …]
|
/third_party/mindspore/mindspore/ |
D | _checkparam.py | 651 def check_transpose_axis(axes, ndim): argument 654 return tuple(range(ndim-1, -1, -1)) 667 if len(axes) != ndim: 700 def check_swapaxes_axis(axes, ndim): argument 703 Validator.check_axis_in_range(axes, ndim) 704 return axes % ndim 709 Validator.check_axis_in_range(axis, ndim) 710 axes = tuple(map(lambda x: x % ndim, axes)) 728 ndim = len(shape) 732 if axes >= ndim or axes < -ndim: [all …]
|
/third_party/mindspore/mindspore/_extends/parse/ |
D | standard_method.py | 176 ndim = P.Rank()(x) 178 for i in F.make_range(0, ndim): 180 for j in F.make_range(i + 1, ndim): 256 ndim = F.rank(x) 257 perm = check_transpose_axis_const(axis, ndim) 382 axis1, axis2 = check_swapaxes_axis_const((axis1, axis2), x.ndim) 389 perm = F.make_range(0, x.ndim) 391 if axis2 + 1 < x.ndim: 546 check_axis_in_range_const(axis, x.ndim) 756 ndim = x.ndim [all …]
|
/third_party/mindspore/mindspore/common/ |
D | tensor.py | 291 def ndim(self): member in Tensor 605 perm = validator.check_transpose_axis(axes, self.ndim) 699 perm = tuple(range(self.ndim-1, -1, -1)) 729 axis1, axis2 = validator.check_swapaxes_axis((axis1, axis2), self.ndim) 736 perm = tuple(range(0, self.ndim)) 737 if axis2 + 1 < self.ndim: 845 axis = validator.check_axis_in_range(axis, a.ndim) 879 axis = validator.check_axis_in_range(axis, a.ndim) 925 validator.check_axis_in_range(axis, x.ndim) 1131 axis = validator.check_axis_valid(axis, self.ndim) [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/ |
D | utils.cc | 187 int GetBroadcastGpuAxis(int ndim, int ori_axis) { in GetBroadcastGpuAxis() argument 188 if (ori_axis >= ndim) { in GetBroadcastGpuAxis() 189 return ndim - 1; in GetBroadcastGpuAxis() 192 if (ndim == DIMENSION_1D) { in GetBroadcastGpuAxis() 194 } else if (ndim == DIMENSION_2D) { in GetBroadcastGpuAxis() 196 } else if (ndim == DIMENSION_3D) { in GetBroadcastGpuAxis() 198 } else if (ndim == DIMENSION_4D) { in GetBroadcastGpuAxis() 200 } else if (ndim > DIMENSION_4D) { in GetBroadcastGpuAxis() 201 MS_LOG(ERROR) << "GPU doesn't support ndim>=" << ndim; in GetBroadcastGpuAxis()
|
/third_party/python/Lib/test/ |
D | test_buffer.py | 264 def strides_from_shape(ndim, shape, itemsize, layout): argument 267 if ndim == 0: 271 for i in range(ndim-2, -1, -1): 275 for i in range(1, ndim): 321 def getindex(ndim, ind, strides): argument 324 for i in range(ndim): 333 ndim = len(shape) 334 sstrides = strides_from_shape(ndim, shape, 1, 'C') 335 dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C') 338 fr = getindex(ndim, ind, sstrides) [all …]
|
/third_party/mindspore/mindspore/_extends/graph_kernel/expanders/ |
D | squeeze.py | 37 ndim = len(shape) 38 out_shape = [shape[i] for i in range(ndim) if not (i in axis or (i - ndim) in axis)]
|
/third_party/mindspore/tests/st/ops/cpu/ |
D | test_batchdot_op.py | 42 if y.ndim == 2: 43 axes = [x.ndim - 1, y.ndim - 1] 45 axes = [x.ndim - 1, y.ndim - 2] 47 axes[0] += x.ndim 49 axes[1] += y.ndim 55 if result.ndim == 1:
|
/third_party/mindspore/mindspore/dataset/utils/ |
D | browse_dataset.py | 75 …assert isinstance(image, np.ndarray) and image.ndim == 3 and (image.shape[0] == 3 or image.shape[2… 78 …assert isinstance(bboxes, np.ndarray) and bboxes.ndim == 2 and (bboxes.shape[1] == 4 or bboxes.sha… 80 assert isinstance(labels, np.ndarray) and labels.ndim == 2 and labels.shape[1] == 1 and \ 83 …assert isinstance(segm, np.ndarray) and segm.ndim == 3, "segm must be a ndarray in (M, H, W) forma…
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/kernel/ |
D | pad.cc | 104 int ndim = in_tensors_.front()->shape().size(); in SetConstArgs() local 106 pad_before_ori.reserve(ndim); in SetConstArgs() 108 for (size_t i = 0; i < ndim; i++) { in SetConstArgs() 112 Broadcast2GpuShape(pad_before.s, pad_before_ori.data(), ndim, 0); in SetConstArgs()
|