Home
last modified time | relevance | path

Searched full:hardtanh (Results 1 – 25 of 162) sorted by relevance

1234567

/external/executorch/backends/arm/test/ops/
Dtest_hardtanh.py36 """Tests HardTanh Operator."""
38 class HardTanh(torch.nn.Module): class in TestHardTanh
43 self.hardTanh = torch.nn.Hardtanh()
46 return self.hardTanh(x)
58 .check(["torch.ops.aten.hardtanh.default"])
80 .check_count({"torch.ops.aten.hardtanh.default": 1})
102 .check_count({"torch.ops.aten.hardtanh.default": 1})
117 self._test_hardtanh_tosa_MI_pipeline(self.HardTanh(), (test_data,))
121 self._test_hardtanh_tosa_BI_pipeline(self.HardTanh(), (test_data,))
125 self._test_hardtanh_tosa_u55_BI_pipeline(self.HardTanh(), (test_data,))
/external/executorch/backends/xnnpack/test/ops/
Dhardtanh.py14 class HardTanh(torch.nn.Module): class in TestHardTanh
22 z = torch.nn.Hardtanh(self.min_val, self.max_val)(y)
29 Tester(self.HardTanh(), (input,))
31 .check_count({"torch.ops.aten.hardtanh.default": 1})
44 Tester(self.HardTanh(-2.0, 2.0), (input,))
46 .check_count({"torch.ops.aten.hardtanh.default": 1})
59 Tester(self.HardTanh(), (input,))
64 # Expect three quantize ops - one for input, hardtanh, and add.
66 torch.ops.aten.hardtanh.default: 1,
Dconv2d.py105 self.hardtanh = torch.nn.Hardtanh()
118 y = self.hardtanh(y)
121 y = self.hardtanh(y)
264 This test makes sure that we can fuse batchnorm and hardtanh
281 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
286 x = self.hardtanh(x)
/external/executorch/backends/xnnpack/test/passes/
Dtest_activation_fusion.py145 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
152 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
162 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
173 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
184 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
195 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
Dtest_channels_last_tagged_reshape.py126 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
132 x = self.hardtanh(x)
156 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten.hardtanh.default](args = (…
/external/pytorch/aten/src/ATen/native/metal/ops/
DMetalClamp.mm30 static Tensor hardtanh(
58 return hardtanh(input, min.value(), max.value());
63 m.impl(TORCH_SELECTIVE_NAME("aten::hardtanh"), TORCH_FN(hardtanh));
/external/pytorch/test/quantization/pt2e/
Dtest_metadata_porting.py24 self.hardtanh = torch.nn.Hardtanh()
31 x = self.hardtanh(x)
145 conv2d -> avgpool -> hardtanh -> linear
211 conv2d -> avgpool -> hardtanh -> linear
254 conv2d -> avgpool -> hardtanh -> linear
325 conv2d -> avgpool -> hardtanh -> linear
388 conv2d -> avgpool -> hardtanh -> linear
Dtest_duplicate_dq.py36 self.hardtanh = torch.nn.Hardtanh()
43 x = self.hardtanh(x)
124 conv2d -> avgpool -> hardtanh -> linear
/external/executorch/backends/transforms/
Dfuse_conv_with_clamp.py19 …Some activations like ReLU and hardtanh can be fused with certain operators (e.g. convolution) pre…
27 exir_ops.edge.aten.hardtanh.default,
34 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
/external/executorch/kernels/portable/cpu/
Dop_hardtanh.cpp49 ET_SWITCH_REAL_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() { in hardtanh_out()
51 ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() { in hardtanh_out()
58 ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "hardtanh.out", CTYPE_MAX, [&]() { in hardtanh_out()
/external/executorch/backends/xnnpack/_passes/
Dfuse_activation_pass.py20 Some activations like ReLU and hardtanh can be fused with certain operators preceding it.
36 exir_ops.edge.aten.hardtanh.default,
53 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
/external/pytorch/test/jit/
Dtest_optimize_for_mobile_preserve_debug_info.py236 linear_activation=F.hardtanh,
237 linear_activation_kind="aten::hardtanh",
247 conv2d_activation=F.hardtanh,
248 conv2d_activation_kind="aten::hardtanh",
/external/executorch/backends/xnnpack/partition/
Dconfigs.py47 exir_ops.edge.aten.hardtanh.default,
57 exir_ops.edge.aten.hardtanh.default,
89 exir_ops.edge.aten.hardtanh.default,
92 exir_ops.edge.aten.hardtanh.default,
/external/executorch/backends/qualcomm/tests/
Dmodels.py287 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
293 x3 = self.hardtanh(x2)
540 class HardTanh(torch.nn.Module): class
543 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
546 return self.hardtanh(x)
800 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6.0)
808 x5 = self.hardtanh(x4)
900 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
926 z5 = self.hardtanh(z4)
/external/pytorch/docs/source/
Dmobile_optimizer.rst17Hardtanh fusion**: XNNPACK ops support fusion of clamping. That is clamping of output activation i…
/external/pytorch/test/
Dtest_xnnpack_integration.py1062 # Not inplace hardtanh fusion test.
1064 "aten::hardtanh": 2,
1071 M(F.hardtanh), pattern_count_map, data_shape, prepack_removal=True
1075 pattern_count_map["aten::hardtanh"] = -1
1077 M(F.hardtanh),
1084 # Inplace hardtanh fusion test.
1123 o = F.hardtanh(o)
1126 # Unfusable hardtanh.
1128 "aten::hardtanh": 1, # hardtanh cannot be.
1159 o = F.hardtanh(o, min, max)
[all …]
Dtest_metal.py137 o = F.hardtanh(o)
145 pattern_count_map["aten::hardtanh"] = 1
152 pattern_count_map["aten::hardtanh"] = -1
Dtest_vulkan.py140 o = F.hardtanh(o)
148 pattern_count_map["aten::hardtanh"] = 1
155 pattern_count_map["aten::hardtanh"] = -1
/external/executorch/backends/vulkan/runtime/graph/ops/impl/
DUnaryOp.cpp135 DEFINE_CLAMP_FN(hardtanh);
147 VK_REGISTER_OP(aten.hardtanh.default, hardtanh);
/external/pytorch/torch/ao/ns/fx/
Dmappings.py248 # F.hardtanh
250 nn.Hardtanh,
251 F.hardtanh,
547 F.hardtanh,
687 nn.Hardtanh,
/external/pytorch/torch/ao/quantization/pt2e/
Dgraph_utils.py27 {torch.nn.Hardtanh, torch.nn.functional.hardtanh, torch.nn.functional.hardtanh_},
/external/pytorch/benchmarks/operator_benchmark/pt/
Dqactivation_test.py48 ("functional.hardtanh", qF.hardtanh),
/external/pytorch/torch/nn/modules/
D__init__.py11 Hardtanh,
228 "Hardtanh",
/external/pytorch/aten/src/ATen/native/mkldnn/
DUtils.cpp106 "hardtanh is expected to have two scalar input: min_val and max_val"); in __anonb633e42f0202()
153 {"hardtanh", attr_func_hardtanh}, in fusion_unary_attr_map()
/external/pytorch/torch/_refs/nn/functional/
D__init__.py39 "hardtanh",
1004 @register_decomposition(aten.hardtanh)
1012 def hardtanh( function
1019 Reference implementation of torch.nn.functional.hardtanh
1024 raise RuntimeError("Bool inputs not supported for hardtanh")
1032 "Cannot do hardtanh on an unsigned type with negative limits"
1163 # It may be better to use clamp here, but we use hardtanh to replicate
1165 return torch.nn.functional.hardtanh(a, 0, 6)

1234567