/external/pytorch/torch/ao/quantization/backend_config/ |
D | _qnnpack_pt2e.py | 36 # addmm 43 # input - addmm 46 # input - observer - addmm 49 # input - observer - addmm 52 # addmm, bias, act, weight = node_pattern 53 # return addmm 56 …# BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.at… 62 BackendPatternConfig(torch.ops.aten.addmm.default)
|
/external/executorch/backends/xnnpack/partition/config/ |
D | gemm_configs.py | 48 GEMM-like ops like Convolution, Addmm, Linear, mostly behave in the same way, in which we 323 We will handle the legacy form of addmm partitioning which will include 327 target_name = "addmm.default" 363 # if addmm belongs to linear src partition, then partition the 375 from the linear src partition, and plug those in as the addmm node's args. We also 376 take the users of the src partitions output node as the addmm node's users. Finally 378 getting the deps, we return the addmm nodes users and args back. 393 # map addmm's args to the source partition's inputs 411 # map addmm's args to the source partition linear's inputs and users 416 # Reset addmm node back to old args and users
|
/external/executorch/backends/qualcomm/_passes/ |
D | convert_to_linear.py | 29 Handle missing quantization tag for addmm op after decomposing 37 addmm = exir_ops.edge.aten.addmm.default variable in ConvertToLinear 42 {view_copy: 2, permute_copy: 1, addmm: 1}, 43 {permute_copy: 1, addmm: 1}, 167 addmm_node = [n for n in partitioned_nodes if n.target == self.addmm][0] 168 # weight -> permute -> input of addmm
|
/external/executorch/backends/apple/mps/runtime/operations/ |
D | LinearAlgebra.mm | 54 … name:@"addmm/matmul"]; 63 name:@"addmm/alpha*matmul"]; 73 name:@"addmm/beta*bias"]; 78 … name:@"addmm/beta*bias*alpha*matmul"];
|
/external/pytorch/tools/autograd/ |
D | deprecated.yaml | 49 - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor 50 aten: addmm(self, mat1, mat2, beta, alpha) 55 - name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor(a!) out) … 58 - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor 59 aten: addmm(self, mat1, mat2, beta, 1) 64 - name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
|
/external/pytorch/test/distributed/_tensor/ |
D | test_matrix_ops.py | 40 dist_res = torch.addmm(input, mat1, mat2) 41 local_res = torch.addmm(input_tensor, tensor_to_shard, tensor_to_replicate) 57 dist_res = torch.addmm(inp, mat1, mat2) 58 local_res = torch.addmm(input_tensor, tensor_to_shard, tensor_to_replicate) 75 local_res = torch.addmm(input_tensor, tensor_to_shard1, tensor_to_shard0) 76 dist_res = torch.addmm(input, mat1, mat2) 78 # test if addmm output is a partial
|
/external/pytorch/test/inductor/ |
D | test_pad_mm.py | 177 def addmm(x, a, b): function 178 return torch.addmm(x, a, b) 183 self.assertEqual(torch.compile(addmm)(x, a, b), addmm(x, a, b)) 284 return torch.addmm(a, b, c) 313 return torch.addmm(a, b, c) 348 return torch.ops.aten.addmm(input, x, y) 358 out_eager = torch.ops.aten.addmm(*inps) 368 out_eager = torch.ops.aten.addmm(*inps)
|
D | test_pattern_matcher.py | 720 torch.addmm(a, b, c), 721 torch.addmm(b, c, a), 722 torch.addmm(c, a, b), 1123 return torch.ops.aten.addmm(inp, a, b) 1126 FileCheck().check("extern_kernels.addmm(").run(code[0]) 1130 return torch.nn.functional.gelu(torch.ops.aten.addmm(inp, a, b)) 1133 FileCheck().check_not("extern_kernels.addmm(").run(code[0]) 1138 torch.ops.aten.addmm(inp, a, b).unsqueeze(0) 1143 FileCheck().check_not("extern_kernels.addmm(").run(code[0]) 1201 return torch.ops.aten.addmm(inp, a, b) [all …]
|
D | test_max_autotune.py | 299 Make sure autotuning addmm in sub processes work without crashes. 304 def addmm(x, a, b): function 305 return torch.addmm(x, a, b) 311 Y_compiled = torch.compile(addmm, dynamic=dynamic)(x, a, b) 312 Y = addmm(x, a, b) 318 Make sure autotuning addmm with zero-size input works without crashes. 321 def addmm(x, a, b): function 322 return torch.addmm(x, a, b) 328 torch.compile(addmm, dynamic=dynamic)(x, a, b) 422 torch.addmm(a, b, c), [all …]
|
D | test_cutlass_backend.py | 454 Make sure autotuning addmm in sub processes work without crashes. 462 def addmm(x, a, b, alpha, beta): function 463 return torch.addmm(x, a, b, alpha=alpha, beta=beta) 471 y_expected = addmm(x, a, b, alpha, beta) 473 compiled_fn = torch.compile(addmm, dynamic=dynamic) 655 return torch.addmm(x, a, b, alpha=beta, beta=alpha) 685 assert op_name == "addmm" 702 def addmm(x, a, b, alpha, beta): function 703 return torch.addmm(x, a, b, alpha=alpha, beta=beta) 730 torch.compile(addmm, dynamic=False)(x, a, b, 1.0, 1.0) [all …]
|
D | test_triton_heuristics.py | 57 addmm = torch.ops.aten.addmm.default(primals_2, view_1, permute_1) 59 return addmm
|
D | test_ck_backend.py | 248 def addmm(x, a, b, alpha, beta): function 249 return torch.addmm(x, a, b, alpha=alpha, beta=beta) 251 Y_compiled = addmm(x, a, b, alpha, beta) 252 Y_eager = torch.addmm(x, a, b, alpha=alpha, beta=beta)
|
/external/pytorch/torch/_inductor/kernel/ |
D | mm.py | 128 torch.addmm, "at::addmm_out", op_overload=aten.addmm.default 146 Giving torch.addmm a 1D tensor calls a different (faster) cublasLt 151 return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta) 152 return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta) 322 @register_lowering(aten.addmm, type_promotion_kind=None) 348 return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout) 369 # unexpand inp to make sure fused addmm from cublasLt is used 391 # in the linear GEMM epilogue used by addmm. 444 # unexpand inp to make sure fused addmm from cublasLt is used 453 "addmm", choices, [inp_expanded, mat1, mat2], layout
|
/external/pytorch/aten/src/ATen/test/ |
D | broadcast_test.cpp | 146 ASSERT_TRUE(a.addmm(b, c).equal(a.expand({5, 7}).addmm(b, c))); in TestExplicitDimBasic() 155 ASSERT_TRUE(aScalar.addmm(b, c).equal(aScalar.expand({5, 7}).addmm(b, c))); in TestExplicitDimWithScalar() 164 ASSERT_ANY_THROW(a.addmm(b, c)); in TestExplicitDimWithMismatchedSizes()
|
/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
D | Lstm.cpp | 9 #include <ATen/ops/addmm.h> 140 at::addmm(b_ii, x, w_ii.t()) + at::addmm(b_hi, h, w_hi.t())); in lstm_input() 142 at::addmm(b_if, x, w_if.t()) + at::addmm(b_hf, h, w_hf.t())); in lstm_input() 144 at::tanh(at::addmm(b_ig, x, w_ig.t()) + at::addmm(b_hg, h, w_hg.t())); in lstm_input() 146 at::addmm(b_io, x, w_io.t()) + at::addmm(b_ho, h, w_ho.t())); in lstm_input()
|
D | Gru.cpp | 9 #include <ATen/ops/addmm.h> 111 at::addmm(b_ir, x, w_ir.t()) + at::addmm(b_hr, h, w_hr.t())); in gru_input() 113 at::addmm(b_iz, x, w_iz.t()) + at::addmm(b_hz, h, w_hz.t())); in gru_input() 115 at::addmm(b_in, x, w_in.t()) + r * (at::addmm(b_hn, h, w_hn.t()))); in gru_input()
|
/external/executorch/backends/transforms/ |
D | addmm_mm_to_linear.py | 104 Replace calls to addmm/mm with linear node 116 node.target == ops.aten.mm.default or node.target == ops.aten.addmm.default 119 if node.target == ops.aten.addmm.default: 125 # Skip this node as it appears to be a standalone `addmm`
|
/external/pytorch/torch/_inductor/fx_passes/ |
D | freezing_patterns.py | 188 aten.addmm(b1, inp, w1), 189 aten.addmm(b2, inp, w2), 190 aten.addmm(b3, inp, w3), 196 return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
|
/external/executorch/backends/xnnpack/partition/ |
D | configs.py | 65 exir_ops.edge.aten.addmm.default, # TODO(T163877189) add constraint for addmm 101 exir_ops.edge.aten.addmm.default, # TODO(T163877189) add constraint for addmm
|
/external/pytorch/test/export/ |
D | test_experimental.py | 218 …addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None 219 view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None 267 …addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None 268 view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None
|
/external/pytorch/test/dynamo/ |
D | test_debug_utils.py | 98 addmm: "f32[6144, 1001]" = torch.ops.aten.addmm.default( 102 amax: "f32[6144, 1]" = torch.ops.aten.amax.default(addmm, [-1], True) 103 sub: "f32[6144, 1001]" = torch.ops.aten.sub.Tensor(addmm, amax)
|
/external/pytorch/benchmarks/operator_benchmark/pt/ |
D | add_test.py | 56 """Mircobenchmark for addmm operator.""" 66 self.set_module_name("addmm") 69 return torch.addmm(input_one, mat1, mat2)
|
/external/executorch/backends/example/example_operators/ |
D | adaptive_avg_pool2d.py | 23 …addmm = torch.ops.aten.addmm.default(fn_bias, arg2_1, permute_copy); fn_bias = arg2_1 = permute_c…
|
D | dropout.py | 23 …addmm = torch.ops.aten.addmm.default(fn_bias, arg2_1, permute_copy); fn_bias = arg2_1 = permute_c…
|
/external/executorch/exir/dialects/edge/op/test/ |
D | test_api.py | 20 aten.addmm.out: aten.addmm.default,
|