Home
last modified time | relevance | path

Searched full:addmm (Results 1 – 25 of 381) sorted by relevance

12345678910>>...16

/external/pytorch/torch/ao/quantization/backend_config/
D_qnnpack_pt2e.py36 # addmm
43 # input - addmm
46 # input - observer - addmm
49 # input - observer - addmm
52 # addmm, bias, act, weight = node_pattern
53 # return addmm
56 …# BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.at…
62 BackendPatternConfig(torch.ops.aten.addmm.default)
/external/executorch/backends/xnnpack/partition/config/
Dgemm_configs.py48 GEMM-like ops like Convolution, Addmm, Linear, mostly behave in the same way, in which we
323 We will handle the legacy form of addmm partitioning which will include
327 target_name = "addmm.default"
363 # if addmm belongs to linear src partition, then partition the
375 from the linear src partition, and plug those in as the addmm node's args. We also
376 take the users of the src partitions output node as the addmm node's users. Finally
378 getting the deps, we return the addmm nodes users and args back.
393 # map addmm's args to the source partition's inputs
411 # map addmm's args to the source partition linear's inputs and users
416 # Reset addmm node back to old args and users
/external/executorch/backends/qualcomm/_passes/
Dconvert_to_linear.py29 Handle missing quantization tag for addmm op after decomposing
37 addmm = exir_ops.edge.aten.addmm.default variable in ConvertToLinear
42 {view_copy: 2, permute_copy: 1, addmm: 1},
43 {permute_copy: 1, addmm: 1},
167 addmm_node = [n for n in partitioned_nodes if n.target == self.addmm][0]
168 # weight -> permute -> input of addmm
/external/executorch/backends/apple/mps/runtime/operations/
DLinearAlgebra.mm54 … name:@"addmm/matmul"];
63 name:@"addmm/alpha*matmul"];
73 name:@"addmm/beta*bias"];
78 … name:@"addmm/beta*bias*alpha*matmul"];
/external/pytorch/tools/autograd/
Ddeprecated.yaml49 - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
50 aten: addmm(self, mat1, mat2, beta, alpha)
55 - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor(a!) out) …
58 - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
59 aten: addmm(self, mat1, mat2, beta, 1)
64 - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
/external/pytorch/test/distributed/_tensor/
Dtest_matrix_ops.py40 dist_res = torch.addmm(input, mat1, mat2)
41 local_res = torch.addmm(input_tensor, tensor_to_shard, tensor_to_replicate)
57 dist_res = torch.addmm(inp, mat1, mat2)
58 local_res = torch.addmm(input_tensor, tensor_to_shard, tensor_to_replicate)
75 local_res = torch.addmm(input_tensor, tensor_to_shard1, tensor_to_shard0)
76 dist_res = torch.addmm(input, mat1, mat2)
78 # test if addmm output is a partial
/external/pytorch/test/inductor/
Dtest_pad_mm.py177 def addmm(x, a, b): function
178 return torch.addmm(x, a, b)
183 self.assertEqual(torch.compile(addmm)(x, a, b), addmm(x, a, b))
284 return torch.addmm(a, b, c)
313 return torch.addmm(a, b, c)
348 return torch.ops.aten.addmm(input, x, y)
358 out_eager = torch.ops.aten.addmm(*inps)
368 out_eager = torch.ops.aten.addmm(*inps)
Dtest_pattern_matcher.py720 torch.addmm(a, b, c),
721 torch.addmm(b, c, a),
722 torch.addmm(c, a, b),
1123 return torch.ops.aten.addmm(inp, a, b)
1126 FileCheck().check("extern_kernels.addmm(").run(code[0])
1130 return torch.nn.functional.gelu(torch.ops.aten.addmm(inp, a, b))
1133 FileCheck().check_not("extern_kernels.addmm(").run(code[0])
1138 torch.ops.aten.addmm(inp, a, b).unsqueeze(0)
1143 FileCheck().check_not("extern_kernels.addmm(").run(code[0])
1201 return torch.ops.aten.addmm(inp, a, b)
[all …]
Dtest_max_autotune.py299 Make sure autotuning addmm in sub processes work without crashes.
304 def addmm(x, a, b): function
305 return torch.addmm(x, a, b)
311 Y_compiled = torch.compile(addmm, dynamic=dynamic)(x, a, b)
312 Y = addmm(x, a, b)
318 Make sure autotuning addmm with zero-size input works without crashes.
321 def addmm(x, a, b): function
322 return torch.addmm(x, a, b)
328 torch.compile(addmm, dynamic=dynamic)(x, a, b)
422 torch.addmm(a, b, c),
[all …]
Dtest_cutlass_backend.py454 Make sure autotuning addmm in sub processes work without crashes.
462 def addmm(x, a, b, alpha, beta): function
463 return torch.addmm(x, a, b, alpha=alpha, beta=beta)
471 y_expected = addmm(x, a, b, alpha, beta)
473 compiled_fn = torch.compile(addmm, dynamic=dynamic)
655 return torch.addmm(x, a, b, alpha=beta, beta=alpha)
685 assert op_name == "addmm"
702 def addmm(x, a, b, alpha, beta): function
703 return torch.addmm(x, a, b, alpha=alpha, beta=beta)
730 torch.compile(addmm, dynamic=False)(x, a, b, 1.0, 1.0)
[all …]
Dtest_triton_heuristics.py57 addmm = torch.ops.aten.addmm.default(primals_2, view_1, permute_1)
59 return addmm
Dtest_ck_backend.py248 def addmm(x, a, b, alpha, beta): function
249 return torch.addmm(x, a, b, alpha=alpha, beta=beta)
251 Y_compiled = addmm(x, a, b, alpha, beta)
252 Y_eager = torch.addmm(x, a, b, alpha=alpha, beta=beta)
/external/pytorch/torch/_inductor/kernel/
Dmm.py128 torch.addmm, "at::addmm_out", op_overload=aten.addmm.default
146 Giving torch.addmm a 1D tensor calls a different (faster) cublasLt
151 return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)
152 return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)
322 @register_lowering(aten.addmm, type_promotion_kind=None)
348 return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout)
369 # unexpand inp to make sure fused addmm from cublasLt is used
391 # in the linear GEMM epilogue used by addmm.
444 # unexpand inp to make sure fused addmm from cublasLt is used
453 "addmm", choices, [inp_expanded, mat1, mat2], layout
/external/pytorch/aten/src/ATen/test/
Dbroadcast_test.cpp146 ASSERT_TRUE(a.addmm(b, c).equal(a.expand({5, 7}).addmm(b, c))); in TestExplicitDimBasic()
155 ASSERT_TRUE(aScalar.addmm(b, c).equal(aScalar.expand({5, 7}).addmm(b, c))); in TestExplicitDimWithScalar()
164 ASSERT_ANY_THROW(a.addmm(b, c)); in TestExplicitDimWithMismatchedSizes()
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DLstm.cpp9 #include <ATen/ops/addmm.h>
140 at::addmm(b_ii, x, w_ii.t()) + at::addmm(b_hi, h, w_hi.t())); in lstm_input()
142 at::addmm(b_if, x, w_if.t()) + at::addmm(b_hf, h, w_hf.t())); in lstm_input()
144 at::tanh(at::addmm(b_ig, x, w_ig.t()) + at::addmm(b_hg, h, w_hg.t())); in lstm_input()
146 at::addmm(b_io, x, w_io.t()) + at::addmm(b_ho, h, w_ho.t())); in lstm_input()
DGru.cpp9 #include <ATen/ops/addmm.h>
111 at::addmm(b_ir, x, w_ir.t()) + at::addmm(b_hr, h, w_hr.t())); in gru_input()
113 at::addmm(b_iz, x, w_iz.t()) + at::addmm(b_hz, h, w_hz.t())); in gru_input()
115 at::addmm(b_in, x, w_in.t()) + r * (at::addmm(b_hn, h, w_hn.t()))); in gru_input()
/external/executorch/backends/transforms/
Daddmm_mm_to_linear.py104 Replace calls to addmm/mm with linear node
116 node.target == ops.aten.mm.default or node.target == ops.aten.addmm.default
119 if node.target == ops.aten.addmm.default:
125 # Skip this node as it appears to be a standalone `addmm`
/external/pytorch/torch/_inductor/fx_passes/
Dfreezing_patterns.py188 aten.addmm(b1, inp, w1),
189 aten.addmm(b2, inp, w2),
190 aten.addmm(b3, inp, w3),
196 return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
/external/executorch/backends/xnnpack/partition/
Dconfigs.py65 exir_ops.edge.aten.addmm.default, # TODO(T163877189) add constraint for addmm
101 exir_ops.edge.aten.addmm.default, # TODO(T163877189) add constraint for addmm
/external/pytorch/test/export/
Dtest_experimental.py218addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None
219 view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None
267addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None
268 view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None
/external/pytorch/test/dynamo/
Dtest_debug_utils.py98 addmm: "f32[6144, 1001]" = torch.ops.aten.addmm.default(
102 amax: "f32[6144, 1]" = torch.ops.aten.amax.default(addmm, [-1], True)
103 sub: "f32[6144, 1001]" = torch.ops.aten.sub.Tensor(addmm, amax)
/external/pytorch/benchmarks/operator_benchmark/pt/
Dadd_test.py56 """Mircobenchmark for addmm operator."""
66 self.set_module_name("addmm")
69 return torch.addmm(input_one, mat1, mat2)
/external/executorch/backends/example/example_operators/
Dadaptive_avg_pool2d.py23addmm = torch.ops.aten.addmm.default(fn_bias, arg2_1, permute_copy); fn_bias = arg2_1 = permute_c…
Ddropout.py23addmm = torch.ops.aten.addmm.default(fn_bias, arg2_1, permute_copy); fn_bias = arg2_1 = permute_c…
/external/executorch/exir/dialects/edge/op/test/
Dtest_api.py20 aten.addmm.out: aten.addmm.default,

12345678910>>...16