Home
last modified time | relevance | path

Searched full:max_pool2d (Results 1 – 25 of 141) sorted by relevance

123456

/external/executorch/backends/arm/quantizer/quantization_annotation/
Dmax_pool2d_annotator.py24 @register_annotator("max_pool2d")
31 gm.graph, [torch.nn.MaxPool2d, torch.nn.functional.max_pool2d], filter_fn
40 if n.target == torch.ops.aten.max_pool2d.default:
44 ), "ArmQuantizer only works with torch.ops.aten.max_pool2d.default, "
/external/pytorch/aten/src/ATen/native/
DDilatedMaxPool2d.cpp27 "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") in TORCH_META_FUNC()
34 "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") in TORCH_META_FUNC()
40 "max_pool2d: padding must either be a single int, or a tuple of two ints"); in TORCH_META_FUNC()
45 "max_pool2d: dilation must be either a single int, or a tuple of two ints"); in TORCH_META_FUNC()
100 "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") in TORCH_META_FUNC()
107 "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") in TORCH_META_FUNC()
113 "max_pool2d: padding must either be a single int, or a tuple of two ints"); in TORCH_META_FUNC()
118 "max_pool2d: dilation must be either a single int, or a tuple of two ints"); in TORCH_META_FUNC()
DPooling.cpp140 Tensor max_pool2d( in max_pool2d() function
158 return xnnpack::max_pool2d( in max_pool2d()
/external/executorch/backends/xnnpack/test/ops/
Dmaxpool2d.py52 pass transforms it into aten.max_pool2d (if supported).
57 .check_count({"torch.ops.aten.max_pool2d.default": 1})
104 .check_count({"torch.ops.aten.max_pool2d.default": 1})
135 .check_count({"torch.ops.aten.max_pool2d.default": 1})
/external/executorch/backends/vulkan/runtime/graph/ops/glsl/
Dmax_pool2d.yaml7 max_pool2d:
16 - NAME: max_pool2d
/external/executorch/backends/vulkan/runtime/graph/ops/impl/
DPool.cpp62 // max_pool2d
84 std::string kernel_name("max_pool2d"); in add_max_pool2d_node()
116 void max_pool2d(ComputeGraph& graph, const std::vector<ValueRef>& args) { in max_pool2d() function
207 VK_REGISTER_OP(aten.max_pool2d_with_indices.default, max_pool2d);
/external/pytorch/test/onnx/model_defs/
Dmnist.py15 x = F.relu(F.max_pool2d(self.conv1(x), 2))
16 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
/external/pytorch/aten/src/ATen/native/metal/ops/
DMetalPooling.mm17 static Tensor max_pool2d(
105 m.impl(TORCH_SELECTIVE_NAME("aten::max_pool2d"), TORCH_FN(max_pool2d));
/external/executorch/backends/arm/operators/
Dop_max_pool2d.py26 target = "aten.max_pool2d.default"
74 TosaOp.Op().MAX_POOL2D,
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DPool.cpp246 Tensor max_pool2d( in max_pool2d() function
279 VK_KERNEL(max_pool2d)); in max_pool2d()
290 m.impl(TORCH_SELECTIVE_NAME("aten::max_pool2d"), TORCH_FN(max_pool2d)); in TORCH_LIBRARY_IMPL()
/external/pytorch/torch/_inductor/
Dquantized_lowerings.py34 quantized.max_pool2d,
40 lowering.make_fallback(quantized.max_pool2d)
/external/pytorch/benchmarks/tensorexpr/
Dpt_engine.py66 def max_pool2d(self, data, kernel_size, stride=1): member in TorchTensorEngine
67 return torch.nn.functional.max_pool2d(data, kernel_size, stride=stride)
Dpooling.py19 y = self.max_pool2d(self.data, self.kernel_size, stride=1)
/external/executorch/backends/arm/test/ops/
Dtest_max_pool.py70 .check(["torch.ops.aten.max_pool2d.default"])
98 .check_count({"torch.ops.aten.max_pool2d.default": 1})
128 .check_count({"torch.ops.aten.max_pool2d.default": 1})
/external/pytorch/test/cpp/api/
Dintegration.cpp265 x = torch::max_pool2d(conv1->forward(x), {2, 2}).relu(); in TEST_F()
268 x = torch::max_pool2d(x, {2, 2}).relu(); in TEST_F()
301 x = torch::max_pool2d(conv1->forward(x), {2, 2}).relu(); in TEST_F()
304 x = torch::max_pool2d(x, {2, 2}).relu(); in TEST_F()
/external/pytorch/torch/onnx/
Dsymbolic_caffe2.py24 "max_pool2d",
230 def max_pool2d( function
240 return opset9.max_pool2d( # type: ignore[attr-defined]
/external/pytorch/test/fx/
Dtest_source_matcher_utils.py324 gm.graph, ["conv2d", "relu", "max_pool2d"]
330 self.assertEqual(len(module_partitions["max_pool2d"]), 1)
352 module_partitions["max_pool2d"][0],
359 module_partitions["max_pool2d"][0],
/external/executorch/backends/arm/quantizer/
Darm_quantizer.py73 "max_pool2d": [[torch.nn.MaxPool2d], [F.max_pool2d]],
266 "max_pool2d",
/external/pytorch/torch/csrc/jit/codegen/onednn/
Dregister_interface.cpp27 case aten::max_pool2d: in canFuseNode()
/external/tensorflow/tensorflow/python/layers/
Dpooling.py38 max_pool2d = max_pooling2d variable
/external/pytorch/torch/ao/pruning/_experimental/pruner/
Dbase_structured_sparsifier.py152 F.max_pool2d,
176 F.max_pool2d,
/external/pytorch/aten/src/ATen/native/xnnpack/
DEngine.h59 Tensor max_pool2d(
DShim.cpp80 Tensor max_pool2d( in max_pool2d() function
DMaxPooling.cpp136 Tensor max_pool2d( in max_pool2d() function
147 // A call to max_pool2d must have been gated by a call to use_maxpool2d, so in max_pool2d()
/external/pytorch/torch/jit/
D_shape_functions.py276 def max_pool2d( function
286 ), "max_pool2d: kernel_size must either be a single int, or a tuple of two ints"
292 ), "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints"
303 ), "max_pool2d: padding must either be a single int, or a tuple of two ints"
309 ), "max_pool2d: dilation must be either a single int, or a tuple of two ints"
354 out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
1312 …"aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dila…
1313 max_pool2d,

123456