Home
last modified time | relevance | path

Searched full:shape (Results 1 – 25 of 4474) sorted by relevance

12345678910>>...179

/third_party/typescript/tests/baselines/reference/
DtypeGuardNarrowsIndexedAccessOfKnownProperty.types30 type Shape = Square | Rectangle | Circle;
31 >Shape : Shape
35 >"0" : { sub: { under: { shape: Shape; };}; }
38 >sub : { under: { shape: Shape;}; }
41 >under : { shape: Shape; }
43 shape: Shape;
44 >shape : Shape
49 function area(s: Shape): number {
50 >area : (s: Shape) => number
51 >s : Shape
[all …]
DmappedTypes2.types85 interface Shape {
127 function f0(s1: Shape, s2: Shape) {
128 >f0 : (s1: Shape, s2: Shape) => void
129 >s1 : Shape
130 >s2 : Shape
135 >s1 : Shape
143 >s2 : Shape
151 function f1(shape: Shape) {
152 >f1 : (shape: Shape) => void
153 >shape : Shape
[all …]
DmappedTypes2.js37 interface Shape {
58 function f0(s1: Shape, s2: Shape) { argument
63 function f1(shape: Shape) { argument
65 var frozen: Readonly<Shape>;
66 var frozen = freeze(shape);
69 function f2(shape: Shape) { argument
71 var partial: Partial<Shape>;
72 var partial: Partial<Shape> = {};
75 function f3(shape: Shape) { argument
76 const x = pick(shape, "name", "location"); // { name: string, location: Point }
[all …]
/third_party/mindspore/mindspore/core/abstract/
Dprim_arrays.cc35 return std::make_shared<AbstractTensor>(arg, std::make_shared<Shape>()); in InferImplScalarToArray()
40 // Inputs: a tensor with 0 shape. in InferImplArrayToScalar()
44 auto a_shp = arg->shape(); in InferImplArrayToScalar()
46 if (!a_shp->shape().empty()) { in InferImplArrayToScalar()
47 MS_LOG(EXCEPTION) << "array_to_scalar requires zero size shape."; in InferImplArrayToScalar()
104 auto shape = tensor_base->shape(); in InferImplStack() local
105 MS_EXCEPTION_IF_NULL(shape); in InferImplStack()
106 int64_t rank_base = SizeToLong(shape->shape().size()); in InferImplStack()
126 auto ret_shape_ptr = ret->shape(); in InferImplStack()
128 auto ret_shape = ret_shape_ptr->shape(); in InferImplStack()
[all …]
Dprim_maths.cc80 void InferImplReduceFuncCalShape(ShapeVector *shape, const ShapeVector &x_shape, const ValuePtr &ax… in InferImplReduceFuncCalShape() argument
87 if (keep_dims_value) (void)shape->insert(shape->end(), x_shape.size(), 1); in InferImplReduceFuncCalShape()
89 (void)shape->insert(shape->end(), x_shape.begin(), x_shape.end()); in InferImplReduceFuncCalShape()
98 shape->at(LongToSize(axis_value)) = 1; in InferImplReduceFuncCalShape()
105 (void)shape->erase(shape->begin() + axis_value); in InferImplReduceFuncCalShape()
110 (void)shape->insert(shape->end(), x_shape.begin(), x_shape.end()); in InferImplReduceFuncCalShape()
114 shape->at(LongToSize(axis_value)) = 1; in InferImplReduceFuncCalShape()
116 (void)shape->erase(shape->begin() + axis_value); in InferImplReduceFuncCalShape()
145 ShapeVector shape = {}; in InferImplReduceFunc() local
146 ShapeVector x_shape = input_x->shape()->shape(); in InferImplReduceFunc()
[all …]
Dutils.cc56 // calculate dynamic shape in CalculateDynamicShape()
62 if (dims[i] != Shape::SHP_ANY) { in CalculateDynamicShape()
66 if (shape1->shape()[i] != Shape::SHP_ANY && shape2->shape()[i] != Shape::SHP_ANY) { in CalculateDynamicShape()
67 min_dims[i] = std::min(shape1->shape()[i], shape2->shape()[i]); in CalculateDynamicShape()
68 max_dims[i] = std::max(shape1->shape()[i], shape2->shape()[i]); in CalculateDynamicShape()
71 if (shape1->shape()[i] == Shape::SHP_ANY && shape2->shape()[i] != Shape::SHP_ANY) { in CalculateDynamicShape()
73 MS_EXCEPTION(ValueError) << "Shape " << shape1->ToString() in CalculateDynamicShape()
74 << " has dynamic shape, but does not have min/max shape info."; in CalculateDynamicShape()
76 min_dims[i] = std::min(shape1->min_shape()[i], shape2->shape()[i]); in CalculateDynamicShape()
77 max_dims[i] = std::max(shape1->max_shape()[i], shape2->shape()[i]); in CalculateDynamicShape()
[all …]
/third_party/mindspore/tests/st/auto_monad/
Dtest_effect_random.py33 def __init__(self, shape, seed=0): argument
36 self.shape = shape
39 s1 = self.n1.sample(self.shape, mean, sd)
40 s2 = self.n1.sample(self.shape, mean, sd)
41 s3 = self.n1.sample(self.shape, mean, sd)
50 shape = (2, 3)
52 samp = Sampling(shape, seed=seed)
59 def __init__(self, shape=None, seed=0): argument
61 self.shape = shape
65 s1 = C.normal(self.shape, mean, stddev, self.seed)
[all …]
/third_party/boost/boost/math/distributions/
Dpareto.hpp64 RealType shape, in check_pareto_shape() argument
67 if((boost::math::isfinite)(shape)) in check_pareto_shape()
69 if (shape > 0) in check_pareto_shape()
77 "Shape parameter is %1%, but must be > 0!", shape, pol); in check_pareto_shape()
85 "Shape parameter is %1%, but must be finite!", shape, pol); in check_pareto_shape()
123 RealType shape, in check_pareto() argument
127 && check_pareto_shape(function, shape, result, pol); in check_pareto()
151 RealType shape()const in shape() function in boost::math::pareto_distribution
158 RealType m_shape; // distribution shape (k) or alpha
184 RealType shape = dist.shape(); in pdf() local
[all …]
Dinverse_gamma.hpp40 RealType shape, // shape aka alpha in check_inverse_gamma_shape() argument
43 { // Sources say shape argument must be > 0 in check_inverse_gamma_shape()
44 // but seems logical to allow shape zero as special case, in check_inverse_gamma_shape()
46 // (Functions like mean, variance with other limits on shape are checked in check_inverse_gamma_shape()
48 if((shape < 0) || !(boost::math::isfinite)(shape)) in check_inverse_gamma_shape()
52 "Shape parameter is %1%, but must be >= 0 !", shape, pol); in check_inverse_gamma_shape()
76 const char* function, // TODO swap these over, so shape is first. in check_inverse_gamma()
78 RealType shape, // shape aka alpha in check_inverse_gamma() argument
82 && check_inverse_gamma_shape(function, shape, result, pol); in check_inverse_gamma()
103 RealType shape()const in shape() function in boost::math::inverse_gamma_distribution
[all …]
Dweibull.hpp28 RealType shape, in check_weibull_shape() argument
31 if((shape <= 0) || !(boost::math::isfinite)(shape)) in check_weibull_shape()
35 "Shape parameter is %1%, but must be > 0 !", shape, pol); in check_weibull_shape()
61 RealType shape, in check_weibull() argument
64 …return check_scale(function, scale, result, pol) && check_weibull_shape(function, shape, result, p… in check_weibull()
83 RealType shape()const in shape() function in boost::math::weibull_distribution
96 RealType m_shape; // distribution shape
126 RealType shape = dist.shape(); in pdf() local
130 if(false == detail::check_weibull(function, scale, shape, &result, Policy())) in pdf()
137 if(shape == 1) in pdf()
[all …]
Dgamma.hpp29 RealType shape, in check_gamma_shape() argument
32 if((shape <= 0) || !(boost::math::isfinite)(shape)) in check_gamma_shape()
36 "Shape parameter is %1%, but must be > 0 !", shape, pol); in check_gamma_shape()
62 RealType shape, in check_gamma() argument
65 …return check_scale(function, scale, result, pol) && check_gamma_shape(function, shape, result, pol… in check_gamma()
84 RealType shape()const in shape() function in boost::math::gamma_distribution
97 RealType m_shape; // distribution shape
126 RealType shape = dist.shape(); in pdf() local
130 if(false == detail::check_gamma(function, scale, shape, &result, Policy())) in pdf()
139 result = gamma_p_derivative(shape, x / scale, Policy()) / scale; in pdf()
[all …]
/third_party/mindspore/mindspore/ccsrc/common/
Dtrans.cc30 using mindspore::abstract::Shape;
87 size_t GetShapeSize(const std::vector<size_t> &shape) { in GetShapeSize() argument
88 return std::accumulate(shape.begin(), shape.end(), size_t(1), std::multiplies<size_t>()); in GetShapeSize()
222 …std::any_of(shape_list.begin(), shape_list.end(), [](int64_t shape) { return shape == Shape::SHP_A… in HasShapeDynamic() argument
226 bool CheckDims(const std::vector<T> &shape) { in CheckDims() argument
227 if (shape.size() != kNchwDims) { in CheckDims()
228 MS_LOG(ERROR) << "Host shape dims should be 4"; in CheckDims()
234 std::vector<size_t> NchwDeviceShape(const std::vector<size_t> &shape) { in NchwDeviceShape() argument
235 if (!CheckDims(shape)) { in NchwDeviceShape()
238 return shape; in NchwDeviceShape()
[all …]
Dtrans.h78 std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const std::string &format,
81 std::vector<int64_t> TransShapeToDevice(const std::vector<int64_t> &shape, const std::string &forma…
85 std::vector<T> TransShapeToDevice(const std::vector<T> &shape, const std::string &format, const Anf…
96 MS_LOG(DEBUG) << "Start trans infer shape to device shape for node: " << node->DebugString()
100 return TransShapeToDevice(shape, format, groups, input_hidden_size);
138 std::vector<T> PaddingShapeTo5dDefault(const std::vector<T> &shape) { in PaddingShapeTo5dDefault() argument
139 if (shape.size() >= kNcdhw) { in PaddingShapeTo5dDefault()
140 return shape; in PaddingShapeTo5dDefault()
143 switch (shape.size()) { in PaddingShapeTo5dDefault()
147 shape_5d[C_ncdhw] = shape[N_ncdhw]; in PaddingShapeTo5dDefault()
[all …]
/third_party/mindspore/tests/st/ops/gpu/
Dtest_reduce_mean_op.py181 error0 = np.ones(shape=expect0.shape) * 1.0e-5
183 assert output[0].shape == expect0.shape
187 error1 = np.ones(shape=expect1.shape) * 1.0e-5
189 assert output[1].shape == expect1.shape
193 error2 = np.ones(shape=expect2.shape) * 1.0e-5
195 assert output[2].shape == expect2.shape
199 error3 = np.ones(shape=expect3.shape) * 1.0e-5
201 assert output[3].shape == expect3.shape
205 error4 = np.ones(shape=expect4.shape) * 1.0e-5
207 assert output[4].shape == expect4.shape
[all …]
Dtest_realdiv_op.py65 error0 = np.ones(shape=expect0.shape) * 1.0e-5
67 assert output0.shape == expect0.shape
72 error1 = np.ones(shape=expect1.shape) * 1.0e-5
74 assert output1.shape == expect1.shape
79 error2 = np.ones(shape=expect2.shape) * 1.0e-5
81 assert output2.shape == expect2.shape
86 error3 = np.ones(shape=expect3.shape) * 1.0e-5
88 assert output3.shape == expect3.shape
93 error4 = np.ones(shape=expect4.shape) * 1.0e-5
95 assert output4.shape == expect4.shape
[all …]
Dtest_reduce_sum_op.py183 error0 = np.ones(shape=expect0.shape) * 1.0e-5
185 assert output[0].shape == expect0.shape
189 error1 = np.ones(shape=expect1.shape) * 1.0e-5
191 assert output[1].shape == expect1.shape
195 error2 = np.ones(shape=expect2.shape) * 1.0e-5
197 assert output[2].shape == expect2.shape
201 error3 = np.ones(shape=expect3.shape) * 1.0e-5
203 assert output[3].shape == expect3.shape
207 error4 = np.ones(shape=expect4.shape) * 1.0e-5
209 assert output[4].shape == expect4.shape
[all …]
Dtest_mul_op.py62 error0 = np.ones(shape=expect0.shape) * 1.0e-5
64 assert output0.shape == expect0.shape
69 error1 = np.ones(shape=expect1.shape) * 1.0e-5
71 assert output1.shape == expect1.shape
76 error2 = np.ones(shape=expect2.shape) * 1.0e-5
78 assert output2.shape == expect2.shape
83 error3 = np.ones(shape=expect3.shape) * 1.0e-5
85 assert output3.shape == expect3.shape
90 error4 = np.ones(shape=expect4.shape) * 1.0e-5
92 assert output4.shape == expect4.shape
[all …]
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/cl/
Dtranspose.cl21 …transpose_0312_NHWC4(__read_only image2d_t src_data, __write_only image2d_t dst_data, int4 shape) {
25 if (4 * X >= shape.y || Y >= shape.z || 4 * Z >= shape.w) {
28 int H4 = UP_DIV(shape.y, 4);
29 int C4 = UP_DIV(shape.w, 4);
32 if (4 * Z + 1 < shape.w) {
36 if (4 * Z + 2 < shape.w) {
40 if (4 * Z + 3 < shape.w) {
48 if (4 * X + 1 < shape.y) {
51 if (4 * X + 2 < shape.y) {
54 if (4 * X + 3 < shape.y) {
[all …]
/third_party/mindspore/tests/st/ops/cpu/
Dtest_arithmetic_op.py128 error0 = np.ones(shape=expect0.shape) * 1.0e-5
130 assert output0.shape == expect0.shape
135 error1 = np.ones(shape=expect1.shape) * 1.0e-5
137 assert output1.shape == expect1.shape
142 error2 = np.ones(shape=expect2.shape) * 1.0e-5
144 assert output2.shape == expect2.shape
149 error3 = np.ones(shape=expect3.shape) * 1.0e-5
151 assert output3.shape == expect3.shape
156 error4 = np.ones(shape=expect4.shape) * 1.0e-5
158 assert output4.shape == expect4.shape
[all …]
/third_party/boost/libs/python/test/numpy/
Dndarray.py18 for shape in ((60,),(6,10),(4,3,5),(2,2,3,5)):
19 a1 = ndarray_ext.zeros(shape,dt)
20 a2 = v.reshape(a1.shape)
21 self.assertEqual(shape,a1.shape)
27 shape = (6, 10)
28 a1 = ndarray_ext.zeros_matrix(shape, dt)
29 a2 = numpy.matrix(numpy.zeros(shape, dtype=dtp))
30 self.assertEqual(shape,a1.shape)
43 for shape in ((60,),(6,10),(4,3,5),(2,2,3,5)):
44 a1 = a1.reshape(shape)
[all …]
/third_party/mindspore/mindspore/ops/composite/
Drandom_ops.py30 def normal(shape, mean, stddev, seed=None): argument
35 shape (tuple): The shape of random tensor to be generated.
45 … Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
53 >>> shape = (3, 1, 2)
56 >>> output = ops.normal(shape, mean, stddev, seed=5)
57 >>> result = output.shape
60 >>> shape = (3, 1, 3)
63 >>> output = ops.normal(shape, mean, stddev, seed=5)
64 >>> result = output.shape
67 >>> shape = (3, 1, 3)
[all …]
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/tensor_layout/
Dshape_util.h32 * compute the accumulating product of all the values in shape from left to right,
35 …* given a shape = [d_n-1, d_n-2, ..., d_0](d_i > 0, i=0,1,...,n-1, elements of shape must be large…
39 * shape = [2, 8, 32]
43 Status ShapeToAccumulateProduct(const Shape &shape, Shape *shape_accum);
46 * compute the accumulating product of all the values in shape from right to left,
49 …* given a shape = [d_n-1, d_n-2, ..., d_0](d_i > 0, i=0,1,...,n-1, elements of shape must be large…
53 * shape = [2, 8, 32]
57 Status ShapeToAccumulateProductReverse(const Shape &shape, Shape *shape_accum);
60 * compute the original shape from the accumulating product shape_accum,
65 * then *shape = [accum_n-2/accum_n-1, accum_n-3/accum_n-2, ..., accum_0/accum_1]
[all …]
Dshape_util.cc25 * shape = [2, 8, 32]
28 Status ShapeToAccumulateProduct(const Shape &shape, Shape *shape_accum) { in ShapeToAccumulateProduct() argument
32 for (auto iter = shape.begin(); iter < shape.end(); ++iter) { in ShapeToAccumulateProduct()
35 MS_LOG(ERROR) << "element of shape should not be zero"; in ShapeToAccumulateProduct()
45 * shape = [2, 8, 32]
49 Status ShapeToAccumulateProductReverse(const Shape &shape, Shape *shape_accum) { in ShapeToAccumulateProductReverse() argument
53 for (auto iter = shape.end() - 1; iter >= shape.begin(); --iter) { in ShapeToAccumulateProductReverse()
56 MS_LOG(ERROR) << "element of shape should not be zero"; in ShapeToAccumulateProductReverse()
67 * shape = [2, 8, 32]
70 Status AccumulateProductToShape(const Shape &shape_accum, Shape *shape) { in AccumulateProductToShape() argument
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/
Dgpu_kernel.h39 // The max_limit of tensor shape size: 2 Giga-elements(2^31, the largest number in 32 bits).
84 MS_LOG(ERROR) << "kernel must override the `ResetResource()` method when dynamic shape"; in ResetResource()
146 // expand Nd Shape to 4d (N in [0,4])
169 // transpose shape: NCHW To NHWC
170 void ShapeNCHW2NHWC(std::vector<size_t> *shape) { in ShapeNCHW2NHWC() argument
171 std::swap((*shape)[1], (*shape)[3]); in ShapeNCHW2NHWC()
172 std::swap((*shape)[2], (*shape)[1]); in ShapeNCHW2NHWC()
175 // transpose shape: NCDHW To NDHWC
176 void ShapeNCDHW2NDHWC(std::vector<size_t> *shape) { in ShapeNCDHW2NDHWC() argument
177 std::swap((*shape)[1], (*shape)[2]); in ShapeNCDHW2NDHWC()
[all …]
/third_party/mindspore/mindspore/ops/operations/
Drandom_ops.py27 … Returns the tensor with the given shape, the random numbers in it drawn from normal distributions
38 … - **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
41 Tensor. The shape is the same as the input `shape`. The dtype is float32.
45 TypeError: If `shape` is not a tuple.
46 ValueError: If `shape` is not a constant value.
52 >>> shape = (3, 4)
54 >>> output = stdnormal(shape)
64 self.init_prim_io_names(inputs=['shape'], outputs=['output'])
69 def __infer__(self, shape): argument
70 shape_v = shape["value"]
[all …]

12345678910>>...179