| /external/pytorch/aten/src/ATen/test/ |
| D | mobile_memory_cleanup.cpp | 16 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST() 32 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
|
| /external/pytorch/torch/csrc/jit/passes/utils/ |
| D | op_registry.cpp | 29 …_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilatio… in nn_ops_first_input_preserving() 30 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in nn_ops_first_input_preserving()
|
| /external/pytorch/test/ |
| D | test_jit_llga_fuser.py | 439 ["aten::_convolution", 'aten::sigmoid', 'aten::mul'], 440 ["aten::_convolution"] 443 self.assertFused(graph, ['aten::_convolution', silu_op]) 516 self.assertFused(graph, ['aten::_convolution', "aten::clamp"]) 538 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm']) 561 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm', 672 self.assertFused(graph, ['aten::_convolution']) 811 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
|
| D | test_mkldnn_fusion.py | 95 conv_node_name = 'aten::_convolution' if trace else 'aten::conv2d' 170 self.assertGraphContains(graph, kind='aten::_convolution')
|
| D | test_cpp_extensions_aot.py | 294 # needs more overrides) in _convolution.
|
| /external/pytorch/ios/TestApp/custom_build/ |
| D | mobilenetv2.yaml | 1 - aten::_convolution
|
| /external/pytorch/torch/csrc/jit/passes/ |
| D | fold_conv_bn.cpp | 49 // Only looks for _convolution pattern. in replaceConvBiasWithGetAttr() 56 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr() 64 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr() 78 // And change _convolution to take the new value. in replaceConvBiasWithGetAttr()
|
| D | graph_rewrite_helper.cpp | 61 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv() 69 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv()
|
| D | frozen_conv_add_relu_fusion_cuda.cpp | 117 // Convert _convolution and in-place operators for simpler replacement pattern in fuseFrozenConvAddReluImpl()
|
| D | tensorexpr_fuser.cpp | 82 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in supported_non_eltwise_set() 892 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in typesAreSupported() 1079 if (node->kind() == aten::_convolution && !tensorexpr::isConv2d(node)) { in typesAreSupported() 1080 GRAPH_DEBUG("This aten::_convolution node is not a 2D conv"); in typesAreSupported() 1083 if (node->kind() == aten::_convolution || node->kind() == aten::conv2d) { in typesAreSupported()
|
| D | xnnpack_rewrite.cpp | 71 // Replace _convolution with conv1d and conv2d in transformConv1dToConv2d() 118 // Replace _convolution with conv2d in insertPrePackedConv2dOp()
|
| /external/pytorch/torch/csrc/jit/codegen/onednn/ |
| D | register_interface.cpp | 11 case aten::_convolution: in canFuseNode()
|
| D | decompose_silu.cpp | 18 if (inputToSilu->kind() == aten::_convolution) { in shouldDecomposeSilu()
|
| D | graph_helper.cpp | 93 (nodeKind == Symbol::fromQualString("aten::_convolution")) || in createOperator() 478 (kindOfOp == aten::conv2d) || (kindOfOp == aten::_convolution) || in isBetterSuitedForLLGA()
|
| /external/pytorch/aten/src/ATen/ |
| D | autocast_mode.cpp | 215 KERNEL_MPS2(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL() 216 KERNEL_MPS(_convolution, lower_precision_fp) in TORCH_LIBRARY_IMPL() 341 KERNEL_CPU(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL()
|
| /external/pytorch/torch/testing/_internal/ |
| D | autocast_test_lists.py | 87 # deprecated _convolution 88 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, 90 # the current _convolution 91 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
|
| /external/pytorch/test/cpp_extensions/ |
| D | maia_extension.cpp | 18 // This is a hack to workaround the shape checks in _convolution. in get_tensor()
|
| /external/pytorch/test/mobile/model_test/ |
| D | model_ops.yaml | 34 aten::_convolution: 27 35 aten::_convolution.deprecated: 3
|
| /external/pytorch/test/mobile/ |
| D | test_lite_script_module.py | 299 x3 = torch._convolution( 323 "aten::_convolution",
|
| /external/pytorch/aten/src/ATen/native/mkldnn/xpu/ |
| D | Conv.cpp | 537 Tensor _convolution( in _convolution() function 585 return _convolution( in convolution_overrideable()
|
| /external/pytorch/test/cpp/jit/ |
| D | test_lite_interpreter_direct.cpp | 127 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST() 564 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST() 574 "aten::_convolution", in TEST()
|
| /external/pytorch/torch/_inductor/kernel/ |
| D | conv.py | 650 @register_lowering(aten._convolution) 651 def _convolution( function
|
| /external/pytorch/test/jit/ |
| D | test_dtype_analysis.py | 70 "_convolution",
|
| /external/pytorch/torch/utils/ |
| D | flop_counter.py | 131 @register_flop_formula([aten.convolution, aten._convolution]) 540 aten._convolution: conv_flop,
|
| /external/pytorch/torch/csrc/jit/tensorexpr/ |
| D | kernel.h | 26 // Returns true if the TE _convolution node is Conv2d.
|