/external/eigen/Eigen/src/Geometry/ |
D | Transform.h | 24 Dim = Transform::Dim, enumerator 42 int Dim, 58 int Dim, 208 Dim = _Dim, ///< space dimension in which the transformation holds 210 Rows = int(Mode)==(AffineCompact) ? Dim : HDim 221 typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType; 223 typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart; 225 …typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> C… 229 Block<MatrixType,Dim,HDim> >::type AffinePart; 233 const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart; [all …]
|
D | RotationBase.h | 32 enum { Dim = _Dim }; enumerator 37 typedef Matrix<Scalar,Dim,Dim> RotationMatrixType; 38 typedef Matrix<Scalar,Dim,1> VectorType; 56 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t… 57 { return Transform<Scalar,Dim,Isometry>(*this) * t; } 80 …EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar… 82 Transform<Scalar,Dim,Affine> res(r); 89 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Opti… 103 enum { Dim = RotationDerived::Dim }; 104 typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType; [all …]
|
D | Translation.h | 35 enum { Dim = _Dim }; enumerator 39 typedef Matrix<Scalar,Dim,1> VectorType; 41 typedef Matrix<Scalar,Dim,Dim> LinearMatrixType; 43 typedef Transform<Scalar,Dim,Affine> AffineTransformType; 45 typedef Transform<Scalar,Dim,Isometry> IsometryTransformType; 58 eigen_assert(Dim==2); in Translation() 65 eigen_assert(Dim==3); in Translation() 106 EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const 118 res.matrix().row(Dim).setZero(); 119 res(Dim,Dim) = Scalar(1); [all …]
|
D | Scaling.h | 58 template<int Dim> 59 inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const; 62 template<int Dim, int Mode, int Options> 63 …inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Sca… 65 Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t; 76 template<typename Derived,int Dim> 77 inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const 156 template<int Dim> 157 inline Transform<Scalar,Dim,Affine> 158 UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const [all …]
|
D | Homogeneous.h | 96 template<typename Scalar, int Dim, int Mode, int Options> friend 97 EIGEN_DEVICE_FUNC inline const Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous > 98 operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs) 101 return Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous>(lhs,rhs); 222 template<typename Scalar, int Dim, int Mode,int Options> 223 struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> > 225 typedef Transform<Scalar, Dim, Mode, Options> TransformType; 230 template<typename Scalar, int Dim, int Options> 231 struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> > 233 typedef Transform<Scalar, Dim, Projective, Options> TransformType; [all …]
|
/external/eigen/unsupported/test/ |
D | BVH.cpp | 17 …e<typename Scalar, int Dim> AlignedBox<Scalar, Dim> bounding_box(const Matrix<Scalar, Dim, 1> &v) … in bounding_box() argument 22 template<int Dim> 25 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim) 27 typedef Matrix<double, Dim, 1> VectorType; 35 template<int Dim> AlignedBox<double, Dim> bounding_box(const Ball<Dim> &b) in bounding_box() 36 { return AlignedBox<double, Dim>(b.center.array() - b.radius, b.center.array() + b.radius); } in bounding_box() 40 template<int Dim> 44 typedef Matrix<double, Dim, 1> VectorType; 45 typedef Ball<Dim> BallType; 46 typedef AlignedBox<double, Dim> BoxType; [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 361 int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkGroupIDStackObjectIndex() argument 362 assert(Dim < 3); in getDebuggerWorkGroupIDStackObjectIndex() 363 return DebuggerWorkGroupIDStackObjectIndices[Dim]; in getDebuggerWorkGroupIDStackObjectIndex() 367 void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkGroupIDStackObjectIndex() argument 368 assert(Dim < 3); in setDebuggerWorkGroupIDStackObjectIndex() 369 DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx; in setDebuggerWorkGroupIDStackObjectIndex() 373 int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkItemIDStackObjectIndex() argument 374 assert(Dim < 3); in getDebuggerWorkItemIDStackObjectIndex() 375 return DebuggerWorkItemIDStackObjectIndices[Dim]; in getDebuggerWorkItemIDStackObjectIndex() 379 void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkItemIDStackObjectIndex() argument [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | image_ops.cc | 36 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 2, &unused)); in SetOutputToSizedImage() 65 return SetOutputToSizedImage(c, c->Dim(input, 0), 1 /* size_input_idx */, in ResizeShapeFn() 66 c->Dim(input, 3)); in ResizeShapeFn() 103 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(input, -1), 3, &last_dim)); in ColorspaceShapeFn() 126 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused)); in NMSShapeFn() 128 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 4, &unused)); in NMSShapeFn() 151 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused)); in SoftNMSShapeFn() 153 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 4, &unused)); in SoftNMSShapeFn() 178 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused)); in CombinedNMSShapeFn() 180 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 1), c->Dim(scores, 1), &unused)); in CombinedNMSShapeFn() [all …]
|
D | ctc_ops.cc | 50 TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0), in __anoned3a12980102() 51 c->Dim(labels_values, 0), &unused)); in __anoned3a12980102() 57 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anoned3a12980102() 87 TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0), in __anoned3a12980202() 88 c->Dim(labels_values, 0), &unused)); in __anoned3a12980202() 94 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anoned3a12980202() 121 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anoned3a12980302() 152 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anoned3a12980402()
|
D | linalg_ops.cc | 35 TF_RETURN_IF_ERROR(c->Merge(c->Dim(s, -2), c->Dim(s, -1), &d)); in MakeBatchSquareMatrix() 73 TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -2), c->Dim(rhs, -2), &m)); in MatrixSolveShapeFn() 74 DimensionHandle n = c->Dim(lhs, -1); in MatrixSolveShapeFn() 82 TF_RETURN_IF_ERROR(c->Concatenate(out, c->Vector(c->Dim(rhs, -1)), &out)); in MatrixSolveShapeFn() 105 TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -1), c->Dim(rhs, -2), &m)); in MatrixTriangularSolveShapeFn() 110 c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out)); in MatrixTriangularSolveShapeFn() 122 TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); in SelfAdjointEigV2ShapeFn() 148 TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); in LuShapeFn() 172 DimensionHandle m = c->Dim(input, -2); in QrShapeFn() 173 DimensionHandle n = c->Dim(input, -1); in QrShapeFn() [all …]
|
D | boosted_trees_ops.cc | 330 TF_RETURN_IF_ERROR(c->Merge(c->Dim(node_ids_shape, 0), in __anon9fefc8f00902() 331 c->Dim(gradients_shape, 0), &unused_dim)); in __anon9fefc8f00902() 337 TF_RETURN_IF_ERROR(c->Merge(c->Dim(node_ids_shape, 0), in __anon9fefc8f00902() 338 c->Dim(bucketized_feature_shape, 0), in __anon9fefc8f00902() 368 shape_inference::DimensionHandle batch_size = c->Dim(c->input(0), 0); in __anon9fefc8f00a02() 376 TF_RETURN_IF_ERROR(c->Merge(c->Dim(gradients_shape, 0), in __anon9fefc8f00a02() 377 c->Dim(node_ids_shape, 0), &batch_size)); in __anon9fefc8f00a02() 378 TF_RETURN_IF_ERROR(c->Merge(c->Dim(hessians_shape, 0), in __anon9fefc8f00a02() 379 c->Dim(node_ids_shape, 0), &batch_size)); in __anon9fefc8f00a02() 380 TF_RETURN_IF_ERROR(c->Merge(c->Dim(feature_shape, 0), in __anon9fefc8f00a02() [all …]
|
D | sparse_csr_matrix_ops.cc | 54 TF_RETURN_IF_ERROR(c->Merge(c->Dim(matrix_shape, -2), in ValidateSquareMatrixShape() 55 c->Dim(matrix_shape, -1), matrix_dimension)); in ValidateSquareMatrixShape() 68 auto rank = c->Value(c->Dim(c->input(0), 1)); in __anond96a6a670102() 132 auto indices_col = c->Dim(indices, 1); in __anond96a6a670302() 138 ShapeHandle fake_values_vec = c->Vector(c->Dim(indices, 0)); in __anond96a6a670302() 190 auto row_ptrs_dh = c->Dim(csr_sparse_matrix, -2); in __anond96a6a670502() 213 out = c->Vector(c->Dim(sparse_matrix, 0)); in __anond96a6a670602() 269 auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2); in __anond96a6a670702() 270 auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1); in __anond96a6a670702() 286 TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1), in __anond96a6a670702() [all …]
|
D | rnn_ops.cc | 42 DimensionHandle batch_size = c->Dim(x, 0); in __anon95c2132c0102() 43 DimensionHandle cell_size = c->Dim(h_prev, 1); in __anon95c2132c0102() 73 DimensionHandle batch_size = c->Dim(x, 0); in __anon95c2132c0202() 74 DimensionHandle cell_size = c->Dim(h_prev, 1); in __anon95c2132c0202() 75 DimensionHandle twice_cell_size = c->Dim(w_ru, 1); in __anon95c2132c0202() 110 DimensionHandle batch_size = c->Dim(x, 0); in __anon95c2132c0302() 111 DimensionHandle cell_size = c->Dim(cs_prev, 1); in __anon95c2132c0302() 148 DimensionHandle batch_size = c->Dim(x, 0); in __anon95c2132c0402() 149 DimensionHandle cell_size = c->Dim(cs_prev, 1); in __anon95c2132c0402() 188 DimensionHandle timelen = c->Dim(x, 0); in __anon95c2132c0502() [all …]
|
D | sparse_ops.cc | 57 c->set_output(0, c->Vector(c->Dim(a_indices, 0))); in __anon5983aeac0202() 58 c->set_output(1, c->Vector(c->Dim(b_indices, 0))); in __anon5983aeac0202() 79 0, c->Matrix(InferenceContext::kUnknownDim, c->Dim(a_shape, 0))); in __anon5983aeac0302() 111 DimensionHandle output_right = c->Dim(b, adjoint_b ? 0 : 1); in __anon5983aeac0402() 112 DimensionHandle output_left = c->Dim(a_shape, adjoint_a ? 1 : 0); in __anon5983aeac0402() 113 DimensionHandle inner_left = c->Dim(a_shape, adjoint_a ? 0 : 1); in __anon5983aeac0402() 114 DimensionHandle inner_right = c->Dim(b, adjoint_b ? 1 : 0); in __anon5983aeac0402() 162 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), -1), 3, &unused)); in __anon5983aeac0702() 182 c->WithValue(c->Dim(serialized_sparse, 1), 3, &unused)); in __anon5983aeac0802() 236 TF_RETURN_IF_ERROR(c->Merge(c->Dim(ind, 0), c->Dim(val, 0), &num_dim)); in __anon5983aeac0a02() [all …]
|
D | array_ops.cc | 74 TF_RETURN_IF_ERROR(c->Add(c->Dim(input, i), pad0 + pad1, &dims[i])); in PadKnown() 85 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(paddings, 1), 2, &unused)); in PadShapeFn() 89 DimensionHandle n_dim = c->Dim(paddings, 0); in PadShapeFn() 174 dims[i] = c->Dim(input, in_idx); in TransposeShapeFn() 207 DimensionHandle dim = c->Dim(out, i); in SetOutputShapeForReshape() 225 DimensionHandle dim = c->Dim(in, i); in SetOutputShapeForReshape() 264 DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); in SetOutputShapeForReshape() 271 DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); in SetOutputShapeForReshape() 310 if (!c->WithValue(c->Dim(c->input(i), 0), 1, &unused).ok()) { in __anonf6523ebd0202() 351 while (index < axis) dims.push_back(c->Dim(cur, index++)); in __anonf6523ebd0302() [all …]
|
D | lookup_ops.cc | 37 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle)); in TwoElementVectorInputsAndScalarOutputs() 51 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle)); in ScalarAndTwoElementVectorInputsAndScalarOutputs() 81 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); in __anon3b8c08180202() 134 DimensionHandle dim = c->Dim(key_shape_and_type.shape, d); in ValidateTableResourceHandle() 141 keys_prefix_vec.push_back(c->Dim(keys, d)); in ValidateTableResourceHandle() 197 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); in __anon3b8c08180402() 252 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); in __anon3b8c08180702() 256 ShapeHandle keys = c->Vector(c->Dim(values, 0)); in __anon3b8c08180702() 294 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); in __anon3b8c08180902() 452 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim)); in __anon3b8c08180e02() [all …]
|
D | cudnn_rnn_ops.cc | 86 auto seq_length = c->Dim(input_shape, 0); in __anon4fa58c430302() 87 auto batch_size = c->Dim(input_shape, 1); in __anon4fa58c430302() 88 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430302() 129 auto seq_length = c->Dim(input_shape, 0); in __anon4fa58c430402() 130 auto batch_size = c->Dim(input_shape, 1); in __anon4fa58c430402() 131 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430402() 177 auto max_seq_length = c->Dim(input_shape, 0); in __anon4fa58c430502() 178 auto batch_size = c->Dim(input_shape, 1); in __anon4fa58c430502() 179 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430502()
|
/external/tensorflow/tensorflow/core/framework/ |
D | common_shape_fns.cc | 232 DimensionHandle output_rows = transpose_a ? c->Dim(a, 1) : c->Dim(a, 0); in MatMulShape() 233 DimensionHandle output_cols = transpose_b ? c->Dim(b, 0) : c->Dim(b, 1); in MatMulShape() 236 DimensionHandle inner_a = transpose_a ? c->Dim(a, 0) : c->Dim(a, 1); in MatMulShape() 237 DimensionHandle inner_b = transpose_b ? c->Dim(b, 1) : c->Dim(b, 0); in MatMulShape() 346 ? c->Dim(input_shape, axis) in EinsumShape() 401 output_dims.push_back(c->Dim(output_bcast_shape, k)); in EinsumShape() 429 DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2); in BatchMatMulV2Shape() 430 DimensionHandle output_cols = c->Dim(b_shape, adj_y ? -2 : -1); in BatchMatMulV2Shape() 434 TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1), in BatchMatMulV2Shape() 435 c->Dim(b_shape, adj_y ? -1 : -2), &inner_merged)); in BatchMatMulV2Shape() [all …]
|
D | shape_inference_test.cc | 283 EXPECT_EQ("?", c.DebugString(c.Dim(in0, 0))); in TEST_F() 284 EXPECT_EQ("?", c.DebugString(c.Dim(in0, -1))); in TEST_F() 285 EXPECT_EQ("?", c.DebugString(c.Dim(in0, 1000))); in TEST_F() 291 auto d = c.Dim(in1, 0); in TEST_F() 293 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -3))); in TEST_F() 296 d = c.Dim(in1, 1); in TEST_F() 299 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -2))); in TEST_F() 301 d = c.Dim(in1, 2); in TEST_F() 303 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -1))); in TEST_F() 322 EXPECT_FALSE(SameHandle(c.Dim(c.input(1), 1), c.NumElements(c.input(1)))); in TEST_F() [all …]
|
/external/eigen/bench/ |
D | geometry.cpp | 43 enum {Dim = T::Dim}; enumerator 57 template<typename Scalar, int Dim, typename Data> 58 EIGEN_DONT_INLINE void transform(const Transform<Scalar,Dim,Projective>& t, Data& data) in transform() argument 60 data = (t * data.colwise().homogeneous()).template block<Dim,Data::ColsAtCompileTime>(0,0); in transform() 63 template<typename T> struct get_dim { enum { Dim = T::Dim }; }; enumerator 65 struct get_dim<Matrix<S,R,C,O,MR,MC> > { enum { Dim = R }; }; enumerator 72 Matrix<typename Transformation::Scalar,get_dim<Transformation>::Dim,N> data; in run()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 569 int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkGroupIDStackObjectIndex() argument 570 assert(Dim < 3); in getDebuggerWorkGroupIDStackObjectIndex() 571 return DebuggerWorkGroupIDStackObjectIndices[Dim]; in getDebuggerWorkGroupIDStackObjectIndex() 575 void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkGroupIDStackObjectIndex() argument 576 assert(Dim < 3); in setDebuggerWorkGroupIDStackObjectIndex() 577 DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx; in setDebuggerWorkGroupIDStackObjectIndex() 581 int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkItemIDStackObjectIndex() argument 582 assert(Dim < 3); in getDebuggerWorkItemIDStackObjectIndex() 583 return DebuggerWorkItemIDStackObjectIndices[Dim]; in getDebuggerWorkItemIDStackObjectIndex() 587 void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkItemIDStackObjectIndex() argument [all …]
|
/external/mesa3d/src/amd/addrlib/src/core/ |
D | coord.h | 37 enum Dim enum 51 Coordinate(enum Dim dim, INT_32 n); 53 VOID set(enum Dim dim, INT_32 n); 55 enum Dim getdim(); 67 enum Dim dim; 85 UINT_32 Filter(INT_8 f, Coordinate& co, UINT_32 start = 0, enum Dim axis = NUM_DIMS); 112 UINT_32 Filter(INT_8 f, Coordinate& co, UINT_32 start = 0, enum Dim axis = NUM_DIMS);
|
/external/eigen/unsupported/Eigen/src/BVH/ |
D | KdBVH.h | 18 template<typename Scalar, int Dim> 21 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim) 22 typedef Matrix<Scalar, Dim, 1> VectorType; 70 enum { Dim = _Dim }; 74 typedef AlignedBox<Scalar, Dim> Volume; 170 typedef internal::vector_int_pair<Scalar, Dim> VIPair; 172 typedef Matrix<Scalar, Dim, 1> VectorType; 195 build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); 205 build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); 207 build(objCenters, mid, to, objBoxes, (dim + 1) % Dim);
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorSyclLeafCount.h | 97 template <typename OP, typename Dim, typename Expr> 98 struct LeafCount<const TensorReductionOp<OP, Dim, Expr> > { 103 template <typename OP, typename Dim, typename Expr> 104 struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Exp…
|
/external/tensorflow/tensorflow/go/ |
D | signature_test.go | 37 Dim: []*tspb.TensorShapeProto_Dim{ 50 Dim: []*tspb.TensorShapeProto_Dim{ 65 Dim: []*tspb.TensorShapeProto_Dim{ 78 Dim: []*tspb.TensorShapeProto_Dim{ 149 Dim: []*tspb.TensorShapeProto_Dim{
|