Home
last modified time | relevance | path

Searched full:_convolution (Results 1 – 25 of 49) sorted by relevance

12

/external/pytorch/aten/src/ATen/test/
Dmobile_memory_cleanup.cpp16 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
32 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
/external/pytorch/torch/csrc/jit/passes/utils/
Dop_registry.cpp29_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilatio… in nn_ops_first_input_preserving()
30 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in nn_ops_first_input_preserving()
/external/pytorch/test/
Dtest_jit_llga_fuser.py439 ["aten::_convolution", 'aten::sigmoid', 'aten::mul'],
440 ["aten::_convolution"]
443 self.assertFused(graph, ['aten::_convolution', silu_op])
516 self.assertFused(graph, ['aten::_convolution', "aten::clamp"])
538 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm'])
561 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
672 self.assertFused(graph, ['aten::_convolution'])
811 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
Dtest_mkldnn_fusion.py95 conv_node_name = 'aten::_convolution' if trace else 'aten::conv2d'
170 self.assertGraphContains(graph, kind='aten::_convolution')
Dtest_cpp_extensions_aot.py294 # needs more overrides) in _convolution.
/external/pytorch/ios/TestApp/custom_build/
Dmobilenetv2.yaml1 - aten::_convolution
/external/pytorch/torch/csrc/jit/passes/
Dfold_conv_bn.cpp49 // Only looks for _convolution pattern. in replaceConvBiasWithGetAttr()
56 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr()
64 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr()
78 // And change _convolution to take the new value. in replaceConvBiasWithGetAttr()
Dgraph_rewrite_helper.cpp61 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv()
69 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv()
Dfrozen_conv_add_relu_fusion_cuda.cpp117 // Convert _convolution and in-place operators for simpler replacement pattern in fuseFrozenConvAddReluImpl()
Dtensorexpr_fuser.cpp82 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in supported_non_eltwise_set()
892 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in typesAreSupported()
1079 if (node->kind() == aten::_convolution && !tensorexpr::isConv2d(node)) { in typesAreSupported()
1080 GRAPH_DEBUG("This aten::_convolution node is not a 2D conv"); in typesAreSupported()
1083 if (node->kind() == aten::_convolution || node->kind() == aten::conv2d) { in typesAreSupported()
Dxnnpack_rewrite.cpp71 // Replace _convolution with conv1d and conv2d in transformConv1dToConv2d()
118 // Replace _convolution with conv2d in insertPrePackedConv2dOp()
/external/pytorch/torch/csrc/jit/codegen/onednn/
Dregister_interface.cpp11 case aten::_convolution: in canFuseNode()
Ddecompose_silu.cpp18 if (inputToSilu->kind() == aten::_convolution) { in shouldDecomposeSilu()
Dgraph_helper.cpp93 (nodeKind == Symbol::fromQualString("aten::_convolution")) || in createOperator()
478 (kindOfOp == aten::conv2d) || (kindOfOp == aten::_convolution) || in isBetterSuitedForLLGA()
/external/pytorch/aten/src/ATen/
Dautocast_mode.cpp215 KERNEL_MPS2(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL()
216 KERNEL_MPS(_convolution, lower_precision_fp) in TORCH_LIBRARY_IMPL()
341 KERNEL_CPU(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL()
/external/pytorch/torch/testing/_internal/
Dautocast_test_lists.py87 # deprecated _convolution
88 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
90 # the current _convolution
91 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
/external/pytorch/test/cpp_extensions/
Dmaia_extension.cpp18 // This is a hack to workaround the shape checks in _convolution. in get_tensor()
/external/pytorch/test/mobile/model_test/
Dmodel_ops.yaml34 aten::_convolution: 27
35 aten::_convolution.deprecated: 3
/external/pytorch/test/mobile/
Dtest_lite_script_module.py299 x3 = torch._convolution(
323 "aten::_convolution",
/external/pytorch/aten/src/ATen/native/mkldnn/xpu/
DConv.cpp537 Tensor _convolution( in _convolution() function
585 return _convolution( in convolution_overrideable()
/external/pytorch/test/cpp/jit/
Dtest_lite_interpreter_direct.cpp127 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
564 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST()
574 "aten::_convolution", in TEST()
/external/pytorch/torch/_inductor/kernel/
Dconv.py650 @register_lowering(aten._convolution)
651 def _convolution( function
/external/pytorch/test/jit/
Dtest_dtype_analysis.py70 "_convolution",
/external/pytorch/torch/utils/
Dflop_counter.py131 @register_flop_formula([aten.convolution, aten._convolution])
540 aten._convolution: conv_flop,
/external/pytorch/torch/csrc/jit/tensorexpr/
Dkernel.h26 // Returns true if the TE _convolution node is Conv2d.

12