| /external/executorch/backends/arm/test/ops/ |
| D | test_hardtanh.py | 36 """Tests HardTanh Operator.""" 38 class HardTanh(torch.nn.Module): class in TestHardTanh 43 self.hardTanh = torch.nn.Hardtanh() 46 return self.hardTanh(x) 58 .check(["torch.ops.aten.hardtanh.default"]) 80 .check_count({"torch.ops.aten.hardtanh.default": 1}) 102 .check_count({"torch.ops.aten.hardtanh.default": 1}) 117 self._test_hardtanh_tosa_MI_pipeline(self.HardTanh(), (test_data,)) 121 self._test_hardtanh_tosa_BI_pipeline(self.HardTanh(), (test_data,)) 125 self._test_hardtanh_tosa_u55_BI_pipeline(self.HardTanh(), (test_data,))
|
| /external/executorch/backends/xnnpack/test/ops/ |
| D | hardtanh.py | 14 class HardTanh(torch.nn.Module): class in TestHardTanh 22 z = torch.nn.Hardtanh(self.min_val, self.max_val)(y) 29 Tester(self.HardTanh(), (input,)) 31 .check_count({"torch.ops.aten.hardtanh.default": 1}) 44 Tester(self.HardTanh(-2.0, 2.0), (input,)) 46 .check_count({"torch.ops.aten.hardtanh.default": 1}) 59 Tester(self.HardTanh(), (input,)) 64 # Expect three quantize ops - one for input, hardtanh, and add. 66 torch.ops.aten.hardtanh.default: 1,
|
| D | conv2d.py | 105 self.hardtanh = torch.nn.Hardtanh() 118 y = self.hardtanh(y) 121 y = self.hardtanh(y) 264 This test makes sure that we can fuse batchnorm and hardtanh 281 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6) 286 x = self.hardtanh(x)
|
| /external/executorch/backends/xnnpack/test/passes/ |
| D | test_activation_fusion.py | 145 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0), 152 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0), 162 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0), 173 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0), 184 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0), 195 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
|
| D | test_channels_last_tagged_reshape.py | 126 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6) 132 x = self.hardtanh(x) 156 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten.hardtanh.default](args = (…
|
| /external/pytorch/aten/src/ATen/native/metal/ops/ |
| D | MetalClamp.mm | 30 static Tensor hardtanh( 58 return hardtanh(input, min.value(), max.value()); 63 m.impl(TORCH_SELECTIVE_NAME("aten::hardtanh"), TORCH_FN(hardtanh));
|
| /external/pytorch/test/quantization/pt2e/ |
| D | test_metadata_porting.py | 24 self.hardtanh = torch.nn.Hardtanh() 31 x = self.hardtanh(x) 145 conv2d -> avgpool -> hardtanh -> linear 211 conv2d -> avgpool -> hardtanh -> linear 254 conv2d -> avgpool -> hardtanh -> linear 325 conv2d -> avgpool -> hardtanh -> linear 388 conv2d -> avgpool -> hardtanh -> linear
|
| D | test_duplicate_dq.py | 36 self.hardtanh = torch.nn.Hardtanh() 43 x = self.hardtanh(x) 124 conv2d -> avgpool -> hardtanh -> linear
|
| /external/executorch/backends/transforms/ |
| D | fuse_conv_with_clamp.py | 19 …Some activations like ReLU and hardtanh can be fused with certain operators (e.g. convolution) pre… 27 exir_ops.edge.aten.hardtanh.default, 34 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
|
| /external/executorch/kernels/portable/cpu/ |
| D | op_hardtanh.cpp | 49 ET_SWITCH_REAL_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() { in hardtanh_out() 51 ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() { in hardtanh_out() 58 ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "hardtanh.out", CTYPE_MAX, [&]() { in hardtanh_out()
|
| /external/executorch/backends/xnnpack/_passes/ |
| D | fuse_activation_pass.py | 20 Some activations like ReLU and hardtanh can be fused with certain operators preceding it. 36 exir_ops.edge.aten.hardtanh.default, 53 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
|
| /external/pytorch/test/jit/ |
| D | test_optimize_for_mobile_preserve_debug_info.py | 236 linear_activation=F.hardtanh, 237 linear_activation_kind="aten::hardtanh", 247 conv2d_activation=F.hardtanh, 248 conv2d_activation_kind="aten::hardtanh",
|
| /external/executorch/backends/xnnpack/partition/ |
| D | configs.py | 47 exir_ops.edge.aten.hardtanh.default, 57 exir_ops.edge.aten.hardtanh.default, 89 exir_ops.edge.aten.hardtanh.default, 92 exir_ops.edge.aten.hardtanh.default,
|
| /external/executorch/backends/qualcomm/tests/ |
| D | models.py | 287 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6) 293 x3 = self.hardtanh(x2) 540 class HardTanh(torch.nn.Module): class 543 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6) 546 return self.hardtanh(x) 800 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6.0) 808 x5 = self.hardtanh(x4) 900 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6) 926 z5 = self.hardtanh(z4)
|
| /external/pytorch/docs/source/ |
| D | mobile_optimizer.rst | 17 …Hardtanh fusion**: XNNPACK ops support fusion of clamping. That is clamping of output activation i…
|
| /external/pytorch/test/ |
| D | test_xnnpack_integration.py | 1062 # Not inplace hardtanh fusion test. 1064 "aten::hardtanh": 2, 1071 M(F.hardtanh), pattern_count_map, data_shape, prepack_removal=True 1075 pattern_count_map["aten::hardtanh"] = -1 1077 M(F.hardtanh), 1084 # Inplace hardtanh fusion test. 1123 o = F.hardtanh(o) 1126 # Unfusable hardtanh. 1128 "aten::hardtanh": 1, # hardtanh cannot be. 1159 o = F.hardtanh(o, min, max) [all …]
|
| D | test_metal.py | 137 o = F.hardtanh(o) 145 pattern_count_map["aten::hardtanh"] = 1 152 pattern_count_map["aten::hardtanh"] = -1
|
| D | test_vulkan.py | 140 o = F.hardtanh(o) 148 pattern_count_map["aten::hardtanh"] = 1 155 pattern_count_map["aten::hardtanh"] = -1
|
| /external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
| D | UnaryOp.cpp | 135 DEFINE_CLAMP_FN(hardtanh); 147 VK_REGISTER_OP(aten.hardtanh.default, hardtanh);
|
| /external/pytorch/torch/ao/ns/fx/ |
| D | mappings.py | 248 # F.hardtanh 250 nn.Hardtanh, 251 F.hardtanh, 547 F.hardtanh, 687 nn.Hardtanh,
|
| /external/pytorch/torch/ao/quantization/pt2e/ |
| D | graph_utils.py | 27 {torch.nn.Hardtanh, torch.nn.functional.hardtanh, torch.nn.functional.hardtanh_},
|
| /external/pytorch/benchmarks/operator_benchmark/pt/ |
| D | qactivation_test.py | 48 ("functional.hardtanh", qF.hardtanh),
|
| /external/pytorch/torch/nn/modules/ |
| D | __init__.py | 11 Hardtanh, 228 "Hardtanh",
|
| /external/pytorch/aten/src/ATen/native/mkldnn/ |
| D | Utils.cpp | 106 "hardtanh is expected to have two scalar input: min_val and max_val"); in __anonb633e42f0202() 153 {"hardtanh", attr_func_hardtanh}, in fusion_unary_attr_map()
|
| /external/pytorch/torch/_refs/nn/functional/ |
| D | __init__.py | 39 "hardtanh", 1004 @register_decomposition(aten.hardtanh) 1012 def hardtanh( function 1019 Reference implementation of torch.nn.functional.hardtanh 1024 raise RuntimeError("Bool inputs not supported for hardtanh") 1032 "Cannot do hardtanh on an unsigned type with negative limits" 1163 # It may be better to use clamp here, but we use hardtanh to replicate 1165 return torch.nn.functional.hardtanh(a, 0, 6)
|