/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 827 static MVT getVectorVT(MVT VT, unsigned NumElements) { in getVectorVT() argument 832 if (NumElements == 1) return MVT::v1i1; in getVectorVT() 833 if (NumElements == 2) return MVT::v2i1; in getVectorVT() 834 if (NumElements == 4) return MVT::v4i1; in getVectorVT() 835 if (NumElements == 8) return MVT::v8i1; in getVectorVT() 836 if (NumElements == 16) return MVT::v16i1; in getVectorVT() 837 if (NumElements == 32) return MVT::v32i1; in getVectorVT() 838 if (NumElements == 64) return MVT::v64i1; in getVectorVT() 839 if (NumElements == 128) return MVT::v128i1; in getVectorVT() 840 if (NumElements == 512) return MVT::v512i1; in getVectorVT() [all …]
|
D | LowLevelTypeImpl.h | 58 static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) { in vector() argument 59 assert(NumElements > 1 && "invalid number of vector elements"); in vector() 61 return LLT{/*isPointer=*/false, /*isVector=*/true, NumElements, in vector() 66 static LLT vector(uint16_t NumElements, LLT ScalarTy) { in vector() argument 67 assert(NumElements > 1 && "invalid number of vector elements"); in vector() 69 return LLT{ScalarTy.isPointer(), /*isVector=*/true, NumElements, in vector() 74 explicit LLT(bool isPointer, bool isVector, uint16_t NumElements, in LLT() argument 76 init(isPointer, isVector, NumElements, SizeInBits, AddressSpace); in LLT() 212 void init(bool IsPointer, bool IsVector, uint16_t NumElements, 223 assert(NumElements > 1 && "invalid number of vector elements"); [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 588 static MVT getVectorVT(MVT VT, unsigned NumElements) { in getVectorVT() argument 593 if (NumElements == 2) return MVT::v2i1; in getVectorVT() 594 if (NumElements == 4) return MVT::v4i1; in getVectorVT() 595 if (NumElements == 8) return MVT::v8i1; in getVectorVT() 596 if (NumElements == 16) return MVT::v16i1; in getVectorVT() 597 if (NumElements == 32) return MVT::v32i1; in getVectorVT() 598 if (NumElements == 64) return MVT::v64i1; in getVectorVT() 599 if (NumElements == 512) return MVT::v512i1; in getVectorVT() 600 if (NumElements == 1024) return MVT::v1024i1; in getVectorVT() 603 if (NumElements == 1) return MVT::v1i8; in getVectorVT() [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86ShuffleDecodeConstantPool.cpp | 130 unsigned NumElements = MaskTySize / ElSize; in DecodeVPERMILPMask() local 131 assert((NumElements == 2 || NumElements == 4 || NumElements == 8 || in DecodeVPERMILPMask() 132 NumElements == 16) && in DecodeVPERMILPMask() 134 ShuffleMask.reserve(NumElements); in DecodeVPERMILPMask() 138 for (unsigned i = 0; i < NumElements; ++i) { in DecodeVPERMILPMask() 183 unsigned NumElements = MaskTySize / ElSize; in DecodeVPERMIL2PMask() local 184 assert((NumElements == 2 || NumElements == 4 || NumElements == 8) && in DecodeVPERMIL2PMask() 186 ShuffleMask.reserve(NumElements); in DecodeVPERMIL2PMask() 190 for (unsigned i = 0; i < NumElements; ++i) { in DecodeVPERMIL2PMask() 225 Index += Src * NumElements; in DecodeVPERMIL2PMask() [all …]
|
/external/swiftshader/third_party/subzero/crosstest/ |
D | test_vector_ops.cpp | 663 static typename std::enable_if<Vectors<Ty>::NumElements == 4, Ty>::type 665 const uint8_t NumElements = 4; in shufflevector() local 667 V1, V2, Idx0 % (NumElements * 2), Idx1 % (NumElements * 2), in shufflevector() 668 Idx2 % (NumElements * 2), Idx3 % (NumElements * 2)); in shufflevector() 679 static typename std::enable_if<Vectors<Ty>::NumElements == 8, Ty>::type 681 const uint8_t NumElements = 8; in shufflevector() local 683 V1, V2, Idx0 % (NumElements * 2), Idx1 % (NumElements * 2), in shufflevector() 684 Idx2 % (NumElements * 2), Idx3 % (NumElements * 2), in shufflevector() 685 Idx4 % (NumElements * 2), Idx5 % (NumElements * 2), in shufflevector() 686 Idx6 % (NumElements * 2), Idx7 % (NumElements * 2)); in shufflevector() [all …]
|
D | test_select_main.cpp | 42 static const size_t NumElements = Vectors<T>::NumElements; in testSelect() local 47 for (size_t j = 0; j < NumElements; ++j) { in testSelect() 78 static const size_t NumElements = 4; in testSelect() local 83 for (size_t j = 0; j < NumElements; ++j) { in testSelect() 108 static const size_t NumElements = Vectors<T>::NumElements; in testSelectI1() local 113 for (size_t j = 0; j < NumElements; ++j) { in testSelectI1()
|
D | test_icmp_main.cpp | 230 const static size_t NumElementsInType = Vectors<TypeUnsigned>::NumElements; in testsVecInt() 265 const static size_t NumElements = Vectors<T>::NumElements; in incrementI1Vector() local 266 for (Pos = 0; Pos < NumElements; ++Pos) { in incrementI1Vector() 273 return (Pos == NumElements); in incrementI1Vector() 290 const static size_t NumElements = Vectors<T>::NumElements; in testsVecI1() local 294 if (NumElements <= MAX_NUMBER_OF_ELEMENTS_FOR_EXHAUSTIVE_TESTING) { in testsVecI1() 325 for (size_t j = 0; j < NumElements; ++j) { in testsVecI1()
|
/external/tensorflow/tensorflow/lite/toco/ |
D | tooling_util_test.cc | 110 status = NumElements(std::vector<int>{1024, 1024, 2047}, &count); in TEST() 114 status = NumElements(std::vector<int>{1024, 0, 2048}, &count); in TEST() 118 status = NumElements(std::vector<int>{1, 2, -3}, &count); in TEST() 121 status = NumElements(std::vector<int>{1024, 1024, 2048}, &count); in TEST() 129 status = NumElements(std::vector<int32_t>{1024, 1024, 2047}, &count); in TEST() 133 status = NumElements(std::vector<int32_t>{1, 2, -3}, &count); in TEST() 136 status = NumElements(std::vector<int32_t>{1024, 1024, 2048}, &count); in TEST() 144 status = NumElements(std::vector<int64_t>{16777216, 16777216, 32767}, &count); in TEST() 148 status = NumElements(std::vector<int64_t>{1, 2, -3}, &count); in TEST() 151 status = NumElements(std::vector<int64_t>{16777216, 16777216, 32768}, &count); in TEST() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | save_restore_v2_ops.cc | 45 const int num_tensors = static_cast<int>(tensor_names.NumElements()); in ValidateInputs() 47 context, prefix.NumElements() == 1, in ValidateInputs() 49 prefix.NumElements(), " instead.")); in ValidateInputs() 59 tensor_names.NumElements() == shape_and_slices.NumElements(), in ValidateInputs() 62 tensor_names.NumElements(), " vs. ", in ValidateInputs() 63 shape_and_slices.NumElements())); in ValidateInputs() 65 FastBoundsCheck(tensor_names.NumElements() + kFixedInputs, in ValidateInputs() 69 context, shape_and_slices.NumElements() == num_tensors, in ValidateInputs() 72 context->input(2).NumElements())); in ValidateInputs() 103 const int num_tensors = static_cast<int>(tensor_names.NumElements()); in Compute() [all …]
|
D | unravel_index_op.cc | 62 TensorShape({dims_tensor.NumElements()}), in Compute() 73 TensorShape({dims_tensor.NumElements()}), in Compute() 84 ctx, ctx->allocate_output(0, TensorShape({dims_tensor.NumElements()}), in Compute() 94 TensorShape({dims_tensor.NumElements(), in Compute() 95 indices_tensor.NumElements()}), in Compute() 100 Eigen::array<Eigen::Index, 2> reshape{{dims_tensor.NumElements(), 1}}; in Compute() 101 Eigen::array<Eigen::Index, 2> bcast({1, indices_tensor.NumElements()}); in Compute() 103 {1, indices_tensor.NumElements()}}; in Compute() 105 {dims_tensor.NumElements(), 1}); in Compute()
|
D | sparse_slice_grad_op.cc | 62 ctx, backprop_val_grad->NumElements() == output_indices->dim_size(0), in Compute() 65 backprop_val_grad->NumElements(), " and ", in Compute() 73 OP_REQUIRES(ctx, num_dims == input_start->NumElements(), in Compute() 76 " but got length ", input_start->NumElements())); in Compute() 93 for (int64 i = 0; i < input_nnz && j < backprop_val_grad->NumElements(); in Compute() 111 ctx, backprop_val_grad->NumElements() == j, in Compute() 113 "Num elements:", backprop_val_grad->NumElements(), in Compute()
|
D | deserialize_sparse_variant_op.cc | 69 output_shape->NumElements(), &output_indices, in Compute() 108 2, {input_dims_to_stack + element_shape->NumElements()}, in Compute() 116 for (int64 j = 0; j < element_shape->NumElements(); ++j) { in Compute() 136 output_shape->NumElements() - input_dims_to_stack == in Compute() 137 element_shape->NumElements(), in Compute() 141 i, "] was: ", output_shape->NumElements() - input_dims_to_stack, in Compute() 143 "] is: ", element_shape->NumElements())); in Compute() 145 for (int j = 0; j < element_shape->NumElements(); ++j) { in Compute() 155 const int output_rank = output_shape->NumElements(); in Compute() 180 const size_t num_index_rows = element_values->NumElements(); in Compute() [all …]
|
D | list_kernels.h | 118 if (output->NumElements() == 0) { in Compute() 128 t.shaped<T, 2>({1, t.NumElements()}))); in Compute() 130 if (!zeros.NumElements()) { in Compute() 142 {1, zeros.NumElements()}))); in Compute() 145 auto output_flat = output->shaped<T, 2>({1, output->NumElements()}); in Compute() 400 if ((c->num_inputs() <= 2 || i >= c->input(2).NumElements()) && in Compute() 410 OP_REQUIRES(c, i < c->input(2).NumElements(), in Compute() 414 c->input(2).NumElements(), " elements.")); in Compute() 426 if (output->NumElements() == 0) { in Compute() 439 element_tensor.shaped<T, 2>({1, element_tensor.NumElements()}))); in Compute() [all …]
|
D | mkl_quantized_conv_ops.h | 63 DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements()); in MklQuantizationRangeForMultiplication() 64 DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements()); in MklQuantizationRangeForMultiplication() 65 size_t n_channel = min_b_vector.NumElements(); in MklQuantizationRangeForMultiplication()
|
D | sparse_slice_op.cc | 58 const int input_dims = input_shape.NumElements(); in Compute() 59 OP_REQUIRES(context, input_dims == input_start.NumElements(), in Compute() 62 " but got length ", input_start.NumElements())); in Compute() 64 OP_REQUIRES(context, input_dims == input_size.NumElements(), in Compute() 67 " but got length ", input_size.NumElements())); in Compute()
|
D | edit_distance_op.cc | 67 if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1)) in ValidateShapes() 73 if (truth_shape.NumElements() < 2) in ValidateShapes() 77 truth_shape.NumElements()); in ValidateShapes() 78 if (truth_shape.NumElements() != truth_indices.dim_size(1)) in ValidateShapes() 84 if (truth_shape.NumElements() != hypothesis_shape.NumElements()) in ValidateShapes() 125 hypothesis_shape->NumElements(), in Compute() 130 truth_shape->NumElements(), &truth_st_shape)); in Compute()
|
D | quantized_mul_op.cc | 318 if (x.NumElements() == 1) { in Compute() 319 ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), in Compute() 321 } else if (y.NumElements() == 1) { in Compute() 322 ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), in Compute() 326 x.NumElements(), z_data); in Compute() 335 if (x.NumElements() < y.NumElements()) { in Compute() 337 vector_num_elements = x.NumElements(); in Compute() 340 tensor_num_elements = y.NumElements(); in Compute() 344 vector_num_elements = y.NumElements(); in Compute() 347 tensor_num_elements = x.NumElements(); in Compute()
|
D | sparse_tensor_dense_matmul_op.cc | 60 ctx, a_shape->NumElements() == 2, in Compute() 70 OP_REQUIRES(ctx, nnz == a_values->NumElements(), in Compute() 75 ctx, a_indices->shape().dim_size(1) == a_shape->NumElements(), in Compute() 108 FastBoundsCheck(b->NumElements(), int32max) && in Compute() 110 FastBoundsCheck(a_values->NumElements(), int32max)), in Compute() 121 if (out->NumElements() == 0) { in Compute() 127 if (a_values->NumElements() == 0 || b->NumElements() == 0) { in Compute()
|
D | example_parsing_ops.cc | 105 keys->reserve(key_t->NumElements()); in GetTensorKeys() 144 if (names->NumElements() > 0 && names->shape() != serialized->shape()) { in CheckInputShapes() 177 if (def_value.NumElements() != 1) { in CheckInputShapes() 188 } else if (def_value.NumElements() > 0) { in CheckInputShapes() 324 OP_REQUIRES(ctx, def_value.NumElements() == 1, in Compute() 334 } else if (def_value.NumElements() > 0) { in Compute() 440 context_dense_keys->NumElements() + in Compute() 441 feature_list_dense_keys->NumElements()); in Compute() 443 context_sparse_keys->NumElements() + in Compute() 444 feature_list_sparse_keys->NumElements()); in Compute() [all …]
|
/external/mesa3d/src/gallium/drivers/swr/rasterizer/jitter/ |
D | jit_pch.hpp | 142 static inline llvm::VectorType* getVectorType(llvm::Type *ElementType, unsigned NumElements) in getVectorType() argument 144 return llvm::VectorType::get(ElementType, NumElements, false); in getVectorType() 147 static inline llvm::VectorType* getVectorType(llvm::Type *ElementType, unsigned NumElements) in getVectorType() argument 149 return llvm::VectorType::get(ElementType, NumElements); in getVectorType()
|
/external/llvm/include/llvm/DebugInfo/CodeView/ |
D | StreamReader.h | 57 Error readArray(ArrayRef<T> &Array, uint32_t NumElements) { in readArray() argument 59 if (NumElements == 0) { in readArray() 64 if (NumElements > UINT32_MAX/sizeof(T)) in readArray() 67 if (auto EC = readBytes(Bytes, NumElements * sizeof(T))) in readArray() 69 Array = ArrayRef<T>(reinterpret_cast<const T *>(Bytes.data()), NumElements); in readArray()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | LowLevelType.cpp | 23 auto NumElements = VTy->getNumElements(); in getLLTForType() local 25 if (NumElements == 1) in getLLTForType() 27 return LLT::vector(NumElements, ScalarTy); in getLLTForType()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | sparse_to_dense.cc | 46 const int output_dimensions = NumElements(output_shape); in Resize() 63 TF_LITE_ENSURE_EQ(context, NumElements(indices), NumElements(values)); in CheckDimensionsMatch() 65 TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 1); in CheckDimensionsMatch() 70 NumElements(output_shape)); in CheckDimensionsMatch() 73 NumElements(values)); in CheckDimensionsMatch() 168 TF_LITE_ENSURE_EQ(context, NumElements(default_value), 1); in Prepare()
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor_util.cc | 41 if (input.NumElements() > 0) { in DeepCopy() 108 CHECK_LE(offset + tensor.NumElements(), result->NumElements()); in Concat() 109 for (int i = 0; i < tensor.NumElements(); ++i) { in Concat() 113 offset += tensor.NumElements(); in Concat() 170 CHECK_LE(offset + split.NumElements(), tensor.NumElements()); in Split() 171 for (int i = 0; i < split.NumElements(); ++i) { in Split() 175 offset += split.NumElements(); in Split()
|
/external/clang/lib/AST/ |
D | ExprObjC.cpp | 24 NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) { in ObjCArrayLiteral() 47 unsigned NumElements) { in CreateEmpty() argument 49 void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumElements)); in CreateEmpty() 50 return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements); in CreateEmpty() 59 NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR), in ObjCDictionaryLiteral() 64 for (unsigned I = 0; I < NumElements; I++) { in ObjCDictionaryLiteral() 99 ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements, in CreateEmpty() argument 102 NumElements, HasPackExpansions ? NumElements : 0)); in CreateEmpty() 104 ObjCDictionaryLiteral(EmptyShell(), NumElements, HasPackExpansions); in CreateEmpty()
|