/external/rust/android-crates-io/crates/glam/tests/ |
D | mat2.rs | 5 ($t:ident, $newmat2:ident, $mat2:ident, $mat3:ident, $newvec2:ident, $vec2:ident) => { 12 const M0: $mat2 = $mat2::from_cols($newvec2(1.0, 2.0), $newvec2(3.0, 4.0)); 13 const M1: $mat2 = $mat2::from_cols_array(&ARRAY1X4); 14 const M2: $mat2 = $mat2::from_cols_array_2d(&ARRAY2X2); 22 assert_eq!($mat2::IDENTITY, $mat2::from_cols_array(&[1., 0., 0., 1.])); 23 let identity = $mat2::IDENTITY; 25 assert_eq!($mat2::from_cols_array_2d(&IDENTITY), identity); 27 assert_eq!(identity, $mat2::default()); 28 assert_eq!(identity, $mat2::from_diagonal($vec2::ONE)); 32 assert_eq!($mat2::ZERO, $mat2::from_cols_array(&[0., 0., 0., 0.])); [all …]
|
/external/skia/tests/sksl/shared/ |
D | MatrixEquality.glsl | 5 uniform mat2 testMatrix2x2; 9 _0_ok = _0_ok && testMatrix2x2 == mat2(1.0, 2.0, 3.0, 4.0); 11 _0_ok = _0_ok && testMatrix2x2 != mat2(100.0); 17 _0_ok = _0_ok && mat2(_2_one, _1_zero, _1_zero, _2_one) == mat2(1.0, 0.0, 0.0, 1.0); 18 _0_ok = _0_ok && mat2(_2_one, _1_zero, vec2(_2_one)) != mat2(1.0, 0.0, 0.0, 1.0); 19 _0_ok = _0_ok && mat2(_2_one) == mat2(1.0); 20 _0_ok = _0_ok && mat2(_2_one) != mat2(0.0); 21 _0_ok = _0_ok && mat2(-_2_one) == mat2(-1.0); 22 _0_ok = _0_ok && mat2(_1_zero) == mat2(-0.0); 23 _0_ok = _0_ok && -mat2(-_2_one) == mat2(1.0); [all …]
|
D | Matrices.glsl | 7 mat2 m1 = mat2(1.0, 2.0, 3.0, 4.0); 8 ok = ok && m1 == mat2(1.0, 2.0, 3.0, 4.0); 9 mat2 m3 = m1; 10 ok = ok && m3 == mat2(1.0, 2.0, 3.0, 4.0); 11 mat2 m4 = mat2(6.0); 12 ok = ok && m4 == mat2(6.0, 0.0, 0.0, 6.0); 14 ok = ok && m3 == mat2(6.0, 12.0, 18.0, 24.0); 15 mat2 m5 = mat2(m1[1].y); 16 ok = ok && m5 == mat2(4.0, 0.0, 0.0, 4.0); 18 ok = ok && m1 == mat2(5.0, 2.0, 3.0, 8.0); [all …]
|
D | VectorToMatrixCast.glsl | 10 ok = ok && mat2(testInputs.xy, testInputs.zw) == mat2(-1.25, 0.0, 0.75, 2.25); 11 ok = ok && mat2(testInputs.xy, testInputs.zw) == mat2(-1.25, 0.0, 0.75, 2.25); 12 ok = ok && mat2(colorGreen.xy, colorGreen.zw) == mat2(0.0, 1.0, 0.0, 1.0); 13 ok = ok && mat2(colorGreen.xy, colorGreen.zw) == mat2(0.0, 1.0, 0.0, 1.0); 14 …ok = ok && mat2(vec4(ivec4(colorGreen)).xy, vec4(ivec4(colorGreen)).zw) == mat2(0.0, 1.0, 0.0, 1.0… 15 ok = ok && mat2(colorGreen.xy, colorGreen.zw) == mat2(0.0, 1.0, 0.0, 1.0); 16 ok = ok && mat2(colorGreen.xy, colorGreen.zw) == mat2(0.0, 1.0, 0.0, 1.0); 17 …ok = ok && mat2(vec4(bvec4(colorGreen)).xy, vec4(bvec4(colorGreen)).zw) == mat2(0.0, 1.0, 0.0, 1.0… 18 …ok = ok && ((_tempVec0 = colorGreen - colorRed), mat2(_tempVec0.xy, _tempVec0.zw)) == mat2(-1.0, 1… 19 …ok = ok && ((_tempVec1 = colorGreen + 5.0), mat2(_tempVec1.xy, _tempVec1.zw)) == mat2(5.0, 6.0, 5.…
|
D | MatrixOpEqualsES2.glsl | 35 mat2 m = mat2(10.0, 20.0, 30.0, 40.0); 36 m -= mat2(1.0, 2.0, 3.0, 4.0); 37 ok = ok && m == mat2(9.0, 18.0, 27.0, 36.0); 40 mat2 m = mat2(2.0, 4.0, 6.0, 8.0); 41 m /= mat2(2.0, 2.0, 2.0, 4.0); 42 ok = ok && m == mat2(1.0, 2.0, 3.0, 2.0); 45 mat2 m = mat2(1.0, 2.0, 7.0, 4.0); 46 m *= mat2(3.0, 5.0, 3.0, 2.0); 47 ok = ok && m == mat2(38.0, 26.0, 17.0, 14.0); 86 mat2 _5_m = mat2(10.0, 20.0, 30.0, 40.0); [all …]
|
D | ArrayComparison.glsl | 18 mat2 m1[3] = mat2[3](mat2(1.0), mat2(2.0), mat2(3.0, 4.0, 5.0, 6.0)); 19 mat2 m2[3] = mat2[3](mat2(1.0), mat2(2.0), mat2(3.0, 4.0, 5.0, 6.0)); 20 mat2 m3[3] = mat2[3](mat2(1.0), mat2(2.0, 3.0, 4.0, 5.0), mat2(6.0));
|
D | MatrixScalarMath.glsl | 9 bool test_bifffff22(int op, float m11, float m12, float m21, float m22, mat2 expected) { 11 mat2 m2 = mat2(m11 * one, m12 * one, m21 * one, m22 * one); 30 mat2 mat = mat2(vec2(ten), vec2(ten)); 31 mat2 div = mat * (1.0 / testInputs.x); 40 mat2 _0_expected = mat2(f1 + 1.0, f2 + 1.0, f3 + 1.0, f4 + 1.0); 42 mat2 _2_m2 = mat2(f1 * _1_one, f2 * _1_one, f3 * _1_one, f4 * _1_one); 46 …mat2(f1 - 1.0, f2 - 1.0, f3 - 1.0, f4 - 1.0))) && test_bifffff22(star, f1, f2, f3, f4, mat2(f1 * 2…
|
/external/pytorch/torch/_inductor/fx_passes/ |
D | decompose_mem_bound_mm.py | 42 def should_decompose_bmm(mat1, mat2) -> bool: argument 43 if is_node_meta_valid(mat1) and is_node_meta_valid(mat2): 45 mat2 = mat2.meta["val"] 48 if not check_device(mat1, mat2): 51 if len(mat1.shape) != 3 or len(mat2.shape) != 3: 58 ) + (mat2.shape[2] < max_other_dimention_decomposition) < 2: 63 def should_decompose_mm(mat1, mat2) -> bool: argument 64 if is_node_meta_valid(mat1) and is_node_meta_valid(mat2): 66 mat2 = mat2.meta["val"] 70 check_device(mat1, mat2) [all …]
|
D | pad_mm.py | 85 mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None 113 and check_device(mat1, mat2) 114 and check_dtype(mat1, mat2) 115 and all(valid_shape_and_stride(t) for t in (mat1, mat2, input)) 139 input: Tensor, mat1: Tensor, mat2: Tensor, beta: float, alpha: float 141 return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha) 145 mat1, mat2, input = fetch_fake_tensors(match, ("mat1", "mat2", "input")) 146 return should_pad_common(mat1, mat2, input) and should_pad_bench( 147 match, mat1, mat2, torch.ops.aten.addmm, input=input 154 mat2: Tensor, [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_contraction.cpp | 23 Tensor<float, 2, DataLayout> mat2(2, 3); in test_evals() local 27 mat2.setRandom(); in test_evals() 33 typedef TensorEvaluator<decltype(mat1.contract(mat2, dims3)), DefaultDevice> Evaluator; in test_evals() 34 Evaluator eval(mat1.contract(mat2, dims3), DefaultDevice()); in test_evals() 40 VERIFY_IS_APPROX(mat4(0,0), mat1(0,0)*mat2(0,0) + mat1(1,0)*mat2(1,0)); in test_evals() 41 VERIFY_IS_APPROX(mat4(0,1), mat1(0,0)*mat2(0,1) + mat1(1,0)*mat2(1,1)); in test_evals() 42 VERIFY_IS_APPROX(mat4(0,2), mat1(0,0)*mat2(0,2) + mat1(1,0)*mat2(1,2)); in test_evals() 43 VERIFY_IS_APPROX(mat4(1,0), mat1(0,1)*mat2(0,0) + mat1(1,1)*mat2(1,0)); in test_evals() 44 VERIFY_IS_APPROX(mat4(1,1), mat1(0,1)*mat2(0,1) + mat1(1,1)*mat2(1,1)); in test_evals() 45 VERIFY_IS_APPROX(mat4(1,2), mat1(0,1)*mat2(0,2) + mat1(1,1)*mat2(1,2)); in test_evals() [all …]
|
D | cxx11_tensor_comparisons.cpp | 20 Tensor<float, 3> mat2(2,3,7); in test_orderings() local 27 mat2.setRandom(); in test_orderings() 29 lt = mat1 < mat2; in test_orderings() 30 le = mat1 <= mat2; in test_orderings() 31 gt = mat1 > mat2; in test_orderings() 32 ge = mat1 >= mat2; in test_orderings() 37 VERIFY_IS_EQUAL(lt(i,j,k), mat1(i,j,k) < mat2(i,j,k)); in test_orderings() 38 VERIFY_IS_EQUAL(le(i,j,k), mat1(i,j,k) <= mat2(i,j,k)); in test_orderings() 39 VERIFY_IS_EQUAL(gt(i,j,k), mat1(i,j,k) > mat2(i,j,k)); in test_orderings() 40 VERIFY_IS_EQUAL(ge(i,j,k), mat1(i,j,k) >= mat2(i,j,k)); in test_orderings() [all …]
|
/external/pytorch/torch/_inductor/kernel/ |
D | mm.py | 144 def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1): argument 151 return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta) 152 return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta) 159 def tuned_mm(mat1, mat2, *, layout=None): argument 160 m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout) 171 [aten_mm.bind((mat1, mat2), aten_layout)] if use_aten_gemm_kernels() else [] 173 static_shape, is_nonzero = _is_static_problem([mat1, mat2], layout) 178 input_nodes=(mat1, mat2), 183 CUTLASS3xGemmTemplate.add_cutlass_gemm_choices(choices, layout, [mat1, mat2]) 186 CKGemmTemplate.add_ck_gemm_choices(choices, layout, [mat1, mat2]) [all …]
|
D | bmm.py | 107 def tuned_bmm(mat1, mat2, *, layout=None): argument 108 if all(x.get_device().type == "cpu" for x in [mat1, mat2]): 110 if mat1.get_size()[1] == 1 or mat2.get_size()[2] == 1: 112 mat2 = L.unsqueeze(mat2, 1) 113 return L.sum_(L.mul(mat1, mat2), axis=2) 141 if is_valid_to_require_contiguous(mat2): 143 mat2 = may_require_contiguous(mat2, meta_mat2) 145 m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout) 148 choices = [aten_bmm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else [] 153 input_nodes=(mat1, mat2), [all …]
|
/external/executorch/kernels/portable/cpu/util/ |
D | matmul_ops_util.cpp | 22 const Tensor& mat2, in check_addmm_args() argument 27 ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 2)); in check_addmm_args() 30 ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat1, mat2)); in check_addmm_args() 33 ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(mat1, 1, mat2, 0)); in check_addmm_args() 38 bool check_bmm_args(const Tensor& in, const Tensor& mat2, Tensor& out) { in check_bmm_args() argument 40 ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 3)); in check_bmm_args() 43 ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat2, out)); in check_bmm_args() 45 ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 0, mat2, 0)); in check_bmm_args() 46 ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 2, mat2, 1)); in check_bmm_args() 53 const Tensor& mat2, in get_bmm_out_target_size() argument [all …]
|
/external/pytorch/aten/src/ATen/native/mkldnn/ |
D | Matmul.cpp | 14 const Tensor &mat2, in mkldnn_matmul() argument 23 const Tensor& mat2, in use_mkldnn_bf16_matmul() argument 30 const Tensor& mat2, in use_mkldnn_fp16_matmul() argument 69 const Tensor& mat2, in use_mkldnn_bf32_matmul() argument 76 const Tensor& mat2, in use_mkldnn_matmul() argument 83 const Tensor &mat2, in mkldnn_matmul_i8i8i32() argument 229 const Tensor &mat2, in mkldnn_matmul() argument 233 TORCH_CHECK((mat1.dim() == 2 && mat2.dim() == 2) || // aten::addmm in mkldnn_matmul() 234 (mat1.dim() == 3 && mat2.dim() == 3) || // aten::bmm, aten::baddbmm in mkldnn_matmul() 235 (mat1.dim() == 2 && mat2.dim() == 1) || // aten::mv in mkldnn_matmul() [all …]
|
/external/executorch/kernels/optimized/cpu/ |
D | op_bmm.cpp | 13 // Performs a batch matrix-matrix product of matrices stored in input and mat2. 15 // input and mat2 must be 3-D tensors each containing the same number of 18 // If input is a (b \times n \times m)(b×n×m) tensor, mat2 is a (b \times m 32 bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) { in check_bmm_out_args() argument 35 self.dim() == mat2.dim(), in check_bmm_out_args() 36 "self.dim() %zd != mat2.dim() %zd", in check_bmm_out_args() 38 mat2.dim()); in check_bmm_out_args() 51 self.size(0) == mat2.size(0), in check_bmm_out_args() 52 "self.size(0) %zd != mat2.size(0) %zd", in check_bmm_out_args() 54 mat2.size(0)); in check_bmm_out_args() [all …]
|
/external/skia/tests/sksl/folding/ |
D | MatrixScalarNoOpFolding.glsl | 2 uniform mat2 testMatrix2x2; 9 mat2 m; 10 mat2 mm; 11 const mat2 z = mat2(0.0); 19 mm = mat2(0.0); 20 mm = mat2(0.0); 55 mat2 m; 56 mat2 mm; 57 const mat2 z = mat2(0.0); 58 const mat2 s = mat2(vec4(1.0).xy, vec4(1.0).zw); [all …]
|
/external/skia/tests/sksl/workarounds/ |
D | RewriteMatrixComparisons.glsl | 7 uniform mediump mat2 testHalf2x2; 8 uniform highp mat2 testFloat2x2; 12 mediump mat2 _tempMatrix0; 13 mediump mat2 _tempMatrix1; 14 highp mat2 _tempMatrix2; 15 highp mat2 _tempMatrix3; 16 mediump mat2 _tempMatrix4; 17 mediump mat2 _tempMatrix5; 18 highp mat2 _tempMatrix6; 19 highp mat2 _tempMatrix7; [all …]
|
/external/pytorch/aten/src/ATen/native/sparse/ |
D | SparseBlas.cpp | 113 * `mat2` - [in] dense Tensor B of size k × n. 120 const Tensor& mat2, in sparse_sampled_addmm_out_sparse_csr_cpu() argument 124 at::native::sparse::sparse_sampled_addmm_check_inputs(self, mat1, mat2, beta, alpha, result); in sparse_sampled_addmm_out_sparse_csr_cpu() 131 // We allow self to be a single matrix when mat1 and mat2 are batched in sparse_sampled_addmm_out_sparse_csr_cpu() 139 if (mat1.numel() == 0 || mat2.numel() == 0 || result._nnz() == 0) { in sparse_sampled_addmm_out_sparse_csr_cpu() 144 // transpose mat2 to [b, n, k] from performance perspective. in sparse_sampled_addmm_out_sparse_csr_cpu() 145 // for gnn classic usage, mat2 is already stored in [b, n, k] physically, in sparse_sampled_addmm_out_sparse_csr_cpu() 147 auto mat2_t = mat2.transpose(-1, -2).contiguous(); in sparse_sampled_addmm_out_sparse_csr_cpu() 156 const Tensor& mat2, in sparse_sampled_addmm_sparse_csr_cpu() argument 160 at::native::sparse_sampled_addmm_out_sparse_csr_cpu(self, mat1, mat2, beta, alpha, result); in sparse_sampled_addmm_sparse_csr_cpu() [all …]
|
/external/pytorch/aten/src/ATen/native/nested/ |
D | NestedTensorMatmul.cpp | 20 Tensor bmm_nested(const Tensor& self, const Tensor& mat2) { in bmm_nested() argument 22 TORCH_CHECK(mat2.dim() == 3, "batch2 must be a 3D tensor"); in bmm_nested() 25 int64_t ntensors2 = mat2.is_nested() ? get_nested_tensor_impl(mat2)->size(0) : mat2.size(0); in bmm_nested() 32 …const Tensor& mat2_buffer = mat2.is_nested() ? get_nested_tensor_impl(mat2)->get_unsafe_storage_as… in bmm_nested() 38 …get_nested_tensor_impl(self)->get_nested_sizes() : get_nested_tensor_impl(mat2)->get_nested_sizes(… in bmm_nested() 44 const IntArrayRef& mat2_shape = get_size_for_index(mat2, i); in bmm_nested() 67 …mat2_buffer.as_strided(get_size_for_index(mat2, i), get_stride_for_index(mat2, i), get_offset_for_… in bmm_nested() 74 static Tensor matmul_with_bmm_nested(const Tensor& self, const Tensor& mat2) { in matmul_with_bmm_nested() argument 76 // Tensor mat2 = mat2_.contiguous(); in matmul_with_bmm_nested() 78 // mat2 [N, n_heads, head_dim, *] in matmul_with_bmm_nested() [all …]
|
/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
D | SparseBlas.cpp | 34 * `mat2` - [in] dense Tensor B of size k × n. 41 const Tensor& mat2, in sparse_sampled_addmm_out_sparse_csr_cuda() argument 46 self, mat1, mat2, beta, alpha, result); in sparse_sampled_addmm_out_sparse_csr_cuda() 49 // We allow self to be a single matrix when mat1 and mat2 are batched in sparse_sampled_addmm_out_sparse_csr_cuda() 58 if (mat1.numel() == 0 || mat2.numel() == 0) { in sparse_sampled_addmm_out_sparse_csr_cuda() 63 sparse::impl::cuda::sampled_addmm_out_sparse_csr(mat1, mat2, beta, alpha, result); in sparse_sampled_addmm_out_sparse_csr_cuda() 70 const Tensor& mat2, in sparse_sampled_addmm_sparse_csr_cuda() argument 74 at::native::sparse_sampled_addmm_out_sparse_csr_cuda(self, mat1, mat2, beta, alpha, result); in sparse_sampled_addmm_sparse_csr_cuda() 78 // result = beta * self + alpha * (mat1 @ mat2) 82 const Tensor& mat2, in addmm_out_sparse_compressed_cuda() argument [all …]
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | hlsl.matpack-pragma.frag.out | 18 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 19 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 27 0:31 mat2: direct index for structure (layout( column_major) temp 4X4 matrix of … 28 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 29 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 38 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 39 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 48 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( row_major)… 49 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 57 0:32 mat2: direct index for structure (layout( column_major) temp 4X4 matrix of float) [all …]
|
/external/angle/third_party/glslang/src/Test/baseResults/ |
D | hlsl.matpack-pragma.frag.out | 18 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 19 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 27 0:31 mat2: direct index for structure (layout( column_major) temp 4X4 matrix of … 28 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 29 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 38 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( column_maj… 39 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 48 … 4X4 matrix of float mat1, layout( column_major) temp 4X4 matrix of float mat2, layout( row_major)… 49 …mat2, layout( column_major) temp 4X4 matrix of float mat3} g_MyBuffer1, layout( row_major std140) … 57 0:32 mat2: direct index for structure (layout( column_major) temp 4X4 matrix of float) [all …]
|
/external/skia/tests/sksl/intrinsics/ |
D | MatrixCompMultES2.glsl | 5 uniform mat2 testMatrix2x2; 8 mat2 h22 = mat2(1000000.0, 1000000.0, 1000000.0, 1000000.0); 9 const mat2 hugeM22 = mat2(1e+30, 1e+30, 1e+30, 1e+30); 11 h22 = mat2(0.0, 5.0, 10.0, 15.0); 12 mat2 f22 = matrixCompMult(testMatrix2x2, mat2(1.0)); 14 …return (h22 == mat2(0.0, 5.0, 10.0, 15.0) && f22 == mat2(1.0, 0.0, 0.0, 4.0)) && h33 == mat3(2.0, …
|
/external/vixl/examples/aarch64/ |
D | neon-matrix-multiply.cc | 64 // mat2 -> x2 in GenerateNEONMatrixMultiply() 117 float mat1[kLength], mat2[kLength], output[kLength]; in main() local 144 mat2[0] = 1.0f; in main() 145 mat2[4] = 11.24f; in main() 146 mat2[8] = 21.00f; in main() 147 mat2[12] = 21.31f; in main() 148 mat2[1] = 2.0f; in main() 149 mat2[5] = 2.24f; in main() 150 mat2[9] = 8.56f; in main() 151 mat2[13] = 52.03f; in main() [all …]
|