Home
last modified time | relevance | path

Searched full:matmul (Results 1 – 25 of 1142) sorted by relevance

12345678910>>...46

/external/swiftshader/tests/regres/testlists/vk-default/
Dcooperative-vector.txt6601 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actload.non…
6602 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actloadshar…
6603 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actmul.nonu…
6604 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actmul.nonu…
6605 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actmul.nonu…
6606 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.10x1.actmul.nonu…
6607 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.128x128.actmul.n…
6608 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.128x128.actmul.n…
6609 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.128x128.actmul.n…
6610 dEQP-VK.cooperative_vector.matmul.matrixmul.float16_float16_float16_float16.buffer.128x128.actmul.n…
[all …]
/external/pytorch/torch/_inductor/fx_passes/
Dmicro_pipeline_tp.py307 Replace the matmul with the new node.
319 # An ND-matmul is reshape -> mm -> reshape sequence. We first replace
436 matmul = _Matmul.from_match(match)
437 matmuls.append(matmul)
439 matmul = _ScaledMatmul.from_match(match)
440 matmuls.append(matmul)
460 matmul = _Matmul.from_match(match=[user])
461 matmuls.append(matmul)
463 matmul = _ScaledMatmul.from_match([user])
464 matmuls.append(matmul)
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/end2end/
Dback2back_fake_quant.pbtxt31 name: "sequential/quant_dense/MatMul/ReadVariableOp/resource"
58 name: "sequential/quant_dense/MatMul/ReadVariableOp"
60 input: "sequential/quant_dense/MatMul/ReadVariableOp/resource"
69 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp/resource"
90 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp"
92 input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp/resource"
101 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1/resource"
122 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1"
124 input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1/resource"
133 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars"
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tfrt/python_tests/
Dtf_matmul_test.py15 """Tests for tf.MatMul JIT compilation."""
23 def matmul(): function
25 func.func @matmul(%arg0: tensor<?x?xf32>,
27 %0 = "tf.MatMul"(%arg0, %arg1) {
43 np.testing.assert_allclose(res, np.matmul(lhs, rhs), rtol=1e-05)
48 # Matmul: [1, k] x [k, 1]
50 compiled = jitrt.compile(matmul(), "matmul")
55 # Matmul: [1, k] x [k, n]
57 compiled = jitrt.compile(matmul(), "matmul")
63 # Matmul: [n, k] x [k, 1]
[all …]
/external/tensorflow/tensorflow/python/ops/linalg/sparse/
Dsparse_csr_matrix_grad.py228 def matmul(x, y, **kwargs): # pylint: disable=invalid-name function
244 grad_a = matmul(grad, b, transpose_b=not t_b)
246 grad_a = matmul(b, grad, transpose_a=t_b, transpose_b=True)
250 grad_a = matmul(grad, b, adjoint_b=not adj_b)
252 grad_a = matmul(b, grad, adjoint_a=adj_b, adjoint_b=True)
260 grad_a = matmul(b, grad, transpose_a=True, adjoint_b=True)
263 grad_a = matmul(b, grad, transpose_a=True, transpose_b=True)
272 grad_a = matmul(grad, b, transpose_a=True, transpose_b=not t_b)
274 grad_a = matmul(b, grad, transpose_a=t_b)
279 grad_a = matmul(grad, b, transpose_a=True, adjoint_b=not adj_b)
[all …]
/external/tensorflow/tensorflow/compiler/jit/tests/
Dopens2s_gnmt_mixed_precision.golden_summary119 MatMul 1
130 MatMul 2
194 MatMul 10
227 MatMul 20
248 MatMul 1
264 MatMul 2
289 MatMul 1
305 MatMul 2
321 MatMul 1
335 MatMul 1
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
Dmatmul_op_test.py15 """Tests for tensorflow.ops.math_ops.matmul."""
33 # TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
39 """Simple test for tf.matmul where Tout is different from T."""
42 # TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
51 # TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
62 """Simple test for matvec, which is sugar on top of matmul."""
75 np.matmul(full.T, empty), math_ops.matmul(full, empty, adjoint_a=True))
77 np.matmul(empty.T, full), math_ops.matmul(empty, full, adjoint_a=True))
103 @test_util.run_without_tensor_float_32("Tests matmul")
111 print("Built without fp16 matmul support for Cuda, running test on CPU.")
[all …]
/external/tensorflow/tensorflow/compiler/aot/tests/
Dtfcompile_test.cc266 foo::bar::MatMulComp matmul; in TEST() local
267 matmul.set_thread_pool(&device); in TEST()
268 EXPECT_EQ(matmul.arg0_data(), matmul.arg_data(0)); in TEST()
269 EXPECT_EQ(matmul.arg1_data(), matmul.arg_data(1)); in TEST()
273 matmul.arg0(0, 0) = 1; in TEST()
274 matmul.arg0(0, 1) = 2; in TEST()
275 matmul.arg0(0, 2) = 3; in TEST()
276 matmul.arg0(1, 0) = 4; in TEST()
277 matmul.arg0(1, 1) = 5; in TEST()
278 matmul.arg0(1, 2) = 6; in TEST()
[all …]
/external/executorch/backends/qualcomm/_passes/
Dconvert_bmm_to_matmul.py18 Replace bmm to matmul, because bmm is eqaul to matmul in QNN.
26 matmul = exir_ops.edge.aten.matmul.default variable in ConvertBmmToMatmul
49 graph, [operator.matmul, torch.matmul, torch.bmm]
63 # replace bmm to matmul, because bmm is eqaul to matmul in qnn.
65 "call_function", self.matmul, (lhs, rhs)
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Ddevice_copy.mlir5 // CHECK: tf.MatMul
6 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:CPU:0", transpose_a = false, transpose_b =…
14 // CHECK: tf.MatMul
15 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "", transpose_a = false, transpose_b = false} : (te…
23 // CHECK: tf.MatMul
24 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:GPU:0", transpose_a = false, transpose_b =…
32 // CHECK: tf.MatMul
33 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:GPU:0", transpose_a = false, transpose_b =…
41 // CHECK: tf.MatMul
42 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "", transpose_a = false, transpose_b = false} : (te…
[all …]
/external/pytorch/test/distributed/
Dtest_compute_comm_reordering.py111 b = torch.matmul(a, a)
112 return torch.matmul(ar, b)
120 # Verify that the wait_tensor is sinked below the 1st matmul but
121 # above the 2nd matmul.
149 b = torch.matmul(a, a)
151 d = torch.matmul(c, c)
153 return torch.matmul(d, e)
162 # Verify that the all_reduce_ has been raised above the 2nd matmul
163 # but below the 1st matmul. Note that the all_reduce_ directly
164 # writes to the output buffer of the 1st matmul, which is an input
[all …]
/external/tensorflow/tensorflow/cc/framework/
Dgradients_test.cc34 using ops::MatMul;
64 // dy| dx| (MatMul Gradient Graph)
75 // | z| | (MatMul Forward Graph)
93 auto z = MatMul(scope, x, y); in TEST_F()
100 auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); in TEST_F()
101 auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); in TEST_F()
119 auto z = MatMul(scope, x, y); in TEST_F()
128 auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); in TEST_F()
129 auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); in TEST_F()
145 auto x = MatMul(scope, u, v); in TEST_F()
[all …]
/external/pytorch/test/jit/
Dtest_graph_rewrite_passes.py18 res = torch.matmul(x, self.weight.t())
30 if node.kind() == "aten::matmul":
37 check_not = ["aten::matmul", "aten::addmm", "aten::add_", "aten::t("]
45 class Matmul(torch.nn.Module): class
51 return torch.matmul(x, self.weight)
55 model = torch.jit.trace(Matmul(w), [x])
57 # check 3d matmul is not fused
58 FileCheck().check("aten::matmul").run(model.graph)
/external/pytorch/torch/csrc/jit/passes/
Dfrozen_linear_transpose.cpp54 Node* matmul = nullptr; in replace_linear_with_matmul() local
64 matmul = graph_->create(aten::matmul, {node->inputs()[0], weight_t}); in replace_linear_with_matmul()
65 matmul->insertAfter(node); in replace_linear_with_matmul()
69 WithInsertPoint insert_guard(matmul); in replace_linear_with_matmul()
72 node->replaceAllUsesWith(matmul); in replace_linear_with_matmul()
76 graph_->create(aten::add, {matmul->output(), bias, bias_scale}); in replace_linear_with_matmul()
77 bias_result->insertAfter(matmul); in replace_linear_with_matmul()
/external/tensorflow/tensorflow/python/profiler/internal/
Drun_metadata_test.py64 y = math_ops.matmul(x, w)
89 # Grappler might fuse MatMul with BiasAdd in remapper optimizer.
129 self.assertEqual(tfprof_node.children[0].name, 'MatMul')
132 ret = _extract_node(run_meta, 'MatMul')
145 mm = _extract_node(run_meta, 'MatMul')['gpu:0'][0]
160 # random normal must allocated first since matmul depends on it.
162 # deallocates the memory after matmul started.
170 self.assertEqual(tfprof_node.children[0].name, 'MatMul')
173 ret = _extract_node(run_meta, 'MatMul')
176 ret = _extract_node(run_meta, 'MatMul:MatMul')
[all …]
/external/tensorflow/tensorflow/python/ops/
Dlinalg_grad.py51 return -math_ops.matmul( # pylint: disable=invalid-unary-operand-type
53 math_ops.matmul(grad, ainv, adjoint_a=op_adjoint,
475 middle = math_ops.matmul(l, grad, adjoint_a=True)
480 grad_a = math_ops.matmul(
481 math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
508 """Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
517 qdq = math_ops.matmul(q, dq, adjoint_a=True)
519 rdr = math_ops.matmul(r, dr, adjoint_b=True)
523 grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
524 grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
[all …]
/external/tensorflow/tensorflow/python/compiler/tensorrt/test/
Dbatch_matmul_test.py57 x1 = math_ops.matmul(inp, inp1, name="matmul")
68 return {"TRTEngineOp_000": ["matmul", "relu"]}
78 x1 = math_ops.matmul(inp, b, name="matmul")
87 return {"TRTEngineOp_000": ["matmul", "kernel"]}
96 x1 = math_ops.matmul(inp, b, name="matmul")
105 return {"TRTEngineOp_000": ["matmul", "kernel"]}
/external/pytorch/aten/src/ATen/test/
Dnative_test.cpp135 // Throw StartsWith("both arguments to matmul need to be at least 1D") in TestMatmul()
137 ASSERT_ANY_THROW(scalar.matmul(d2)); in TestMatmul()
138 // Throw StartsWith("both arguments to matmul need to be at least 1D") in TestMatmul()
140 ASSERT_ANY_THROW(d2.matmul(scalar)); in TestMatmul()
143 ASSERT_ALLCLOSE(d1.matmul(d1), d1.dot(d1)); in TestMatmul()
144 ASSERT_ALLCLOSE(d2.matmul(d1), d2.mv(d1)); in TestMatmul()
146 ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0)); in TestMatmul()
150 ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o)); in TestMatmul()
155 d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2})); in TestMatmul()
156 ASSERT_ALLCLOSE(d1o.matmul(d3), d1o.expand({5, 1, 2}).bmm(d3).view({5, 3})); in TestMatmul()
[all …]
/external/tensorflow/tensorflow/python/compiler/xla/
Djit.py49 c = tf.matmul(a, b) # compiled
51 d = tf.matmul(a, c) # not compiled
53 compile_ops=lambda node_def: 'matmul' in node_def.op.lower()):
54 e = tf.matmul(a, b) + d # matmul is compiled, the addition is not.
64 f = tf.matmul(a, b)
79 x = tf.matmul(a, b)
81 y = tf.matmul(c, d)
92 x = tf.matmul(a, b)
93 y = tf.matmul(c, d)
/external/pytorch/aten/src/ATen/native/quantized/cpu/
Dqmatmul.cpp18 "MatMul operands should use QInt8 or QUInt8 data types."); in check_inputs()
21 "MatMul operands should have same data type."); in check_inputs()
24 "Only per-tensor quantization is supported in Matmul."); in check_inputs()
27 "Both inputs to Matmul must have the same quantization scheme."); in check_inputs()
44 "MatMul operands should have the same dimensionality. (", num_dims, in qmatmul()
48 "Quantized Matmul currently only supports operands which are at least 2-dimensional. (", in qmatmul()
58 "For Quantized Matmul, the size of tensor a (", k, in qmatmul()
70 "For Quantized Matmul, the size of tensor a (", dim, in qmatmul()
174 Tensor rc = at::matmul(ra, rb); in qmatmul()
182 m.impl(TORCH_SELECTIVE_NAME("quantized::matmul"), TORCH_FN(qmatmul)); in TORCH_LIBRARY_IMPL()
/external/pytorch/torch/fx/experimental/
Dmerge_matmul.py18 splits the output from a merged matmul into the individual results for each
22 result: The merged matmul result tensor.
23 inputs: The list of inputs that were merged into one for the matmul.
26 List of matmul results for each input tensor.
110 # the matmul of which they are the LHS/RHS.
112 if node.op != "call_function" or node.target is not torch.matmul:
138 # Merge the matmul.
149 merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {})
151 # Split the result of the merged matmul using the shapes of the LHS operands
161 … all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul.
/external/pytorch/aten/src/ATen/native/mkldnn/xpu/detail/
DMatmul.cpp14 sycl::event matmul( in matmul() function
25 "oneDNN matmul only works with 2D or 3D, got ", in matmul()
30 TORCH_CHECK(result.defined(), "oneDNN matmul result should be defined"); in matmul()
65 "matmul supports [n] or [1] when bias dim is 1 ..."); in matmul()
79 "matmul supports [m, n] or [1, n] or [m, 1] or [1, 1] when bias dim is 2 ..."); in matmul()
85 "matmul bias must be expandable to:", in matmul()
92 b.numel() == 1, "matmul supports 1 numel when bias dim is [] ..."); in matmul()
99 TORCH_CHECK(0, "unsupported bias dim in matmul ..."); in matmul()
105 // xpu matmul support both ab/ba shape for m2 tensor, we don't check any more in matmul()
167 dnnl::matmul matmul_p; in matmul()
[all …]
/external/pytorch/functorch/op_analysis/
Dannotated_ops25 addmv, composite matmul
47 baddbmm, composite matmul
50 bilinear, composite matmul
61 bmm, composite matmul
98 einsum, composite matmul
150 linear, composite matmul
173 matmul, composite matmul
184 mm, composite matmul
188 mv, composite matmul
276 tensordot, composite matmul
[all …]
/external/pytorch/test/inductor/
Dtest_fused_attention.py115 torch.matmul(query, key.transpose(-2, -1))
118 .matmul(value)
143 torch.matmul(query, key.transpose(-2, -1))
146 .matmul(value)
253 torch.matmul(query, key.transpose(-2, -1))
257 return attn_weights.matmul(value), attn_weights
274 torch.matmul(query, key.transpose(-2, -1))
277 .matmul(value)
289 torch.matmul(query, key.transpose(-2, -1)).div(3.0).softmax(dim=-1),
293 ).matmul(value)
[all …]
/external/tensorflow/tensorflow/c/eager/
Dc_api_remote_test_util.cc48 " name: 'matmul'" in MatMulFunction()
49 " op: 'MatMul'" in MatMulFunction()
63 " value: 'matmul:product'" in MatMulFunction()
137 TFE_Op* matmul = nullptr; in TestRemoteExecuteSilentCopies() local
145 matmul = TFE_NewOp(ctx, "MatMulFunction", status); in TestRemoteExecuteSilentCopies()
147 TFE_OpAddInput(matmul, h0_task0, status); in TestRemoteExecuteSilentCopies()
149 TFE_OpAddInput(matmul, has_packed_input ? packed_handle : h1_task2, status); in TestRemoteExecuteSilentCopies()
153 matmul = MatMulOp(ctx, h0_task0, h1_task2); in TestRemoteExecuteSilentCopies()
156 TFE_OpSetDevice(matmul, task1_name, status); in TestRemoteExecuteSilentCopies()
162 TFE_OpSetDevice(matmul, cpu_device_name.c_str(), status); in TestRemoteExecuteSilentCopies()
[all …]

12345678910>>...46