Home
last modified time | relevance | path

Searched full:leaky_relu (Results 1 – 25 of 187) sorted by relevance

12345678

/external/pytorch/benchmarks/static_runtime/
Ddeep_wide_pt.cc43 x = torch.leaky_relu(input, 0.1)
44 x = torch.leaky_relu(x, 0.1)
45 x = torch.leaky_relu(x, 0.1)
46 x = torch.leaky_relu(x, 0.1)
47 return torch.leaky_relu(x, 0.1)
52 x = torch.leaky_relu(input, neg_slope)
53 x = torch.leaky_relu(x, neg_slope)
54 x = torch.leaky_relu(x, neg_slope)
55 x = torch.leaky_relu(x, neg_slope)
56 return torch.leaky_relu(x, neg_slope)
[all …]
/external/ComputeLibrary/examples/
Dgraph_yolov3.cpp95 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
109 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
123 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
137 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
151 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
166 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
189 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
206 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
220 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
234 …tionLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LEAKY_RELU, 0.1f)).set_name… in do_setup()
[all …]
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
Dleaky-relu.c145 pytorch_qnnp_operator_t leaky_relu, in pytorch_qnnp_setup_leaky_relu_nc_q8() argument
158 leaky_relu->batch_size = 0; in pytorch_qnnp_setup_leaky_relu_nc_q8()
162 leaky_relu->batch_size = batch_size; in pytorch_qnnp_setup_leaky_relu_nc_q8()
163 leaky_relu->input = input; in pytorch_qnnp_setup_leaky_relu_nc_q8()
164 leaky_relu->input_pixel_stride = input_stride; in pytorch_qnnp_setup_leaky_relu_nc_q8()
165 leaky_relu->output = output; in pytorch_qnnp_setup_leaky_relu_nc_q8()
166 leaky_relu->output_pixel_stride = output_stride; in pytorch_qnnp_setup_leaky_relu_nc_q8()
/external/executorch/backends/xnnpack/test/ops/
Dleaky_relu.py26 return torch.nn.functional.leaky_relu(x)
32 .check_count({"torch.ops.aten.leaky_relu.default": 1})
60 .check_count({"torch.ops.aten.leaky_relu.default": 1})
82 "leaky_relu::default": 1,
102 The leaky_relu visitor has logic to handle the default slope, since it's apparently not
113 "leaky_relu::default": 1,
/external/pytorch/test/jit/
Dtest_custom_operators.py43 self.assertNotIn("leaky_relu", torch.ops._test.__dict__)
44 op = torch.ops._test.leaky_relu
46 self.assertIn("leaky_relu", torch.ops._test.__dict__)
47 op2 = torch.ops._test.leaky_relu
65 output = torch.ops._test.leaky_relu(torch.tensor([-1.0, 1.0]))
93 "Unknown keyword argument 'foo' for operator '_test::leaky_relu'",
96 torch.ops._test.leaky_relu(torch.ones(5), foo=torch.ones(5))
/external/pytorch/aten/src/ATen/native/metal/ops/
DMetalLeakyReLU.mm26 [[MetalContext sharedInstance] specializedPipelineState:"leaky_relu"
50 static Tensor leaky_relu(const at::Tensor& input, const Scalar& negative_slope_val) {
61 [[MetalContext sharedInstance] specializedPipelineState:"leaky_relu"
86 m.impl(TORCH_SELECTIVE_NAME("aten::leaky_relu"), TORCH_FN(leaky_relu));
/external/pytorch/torch/ao/quantization/backend_config/
Donednn.py76 def _fuse_linear_bn_leaky_relu(is_qat, linear, bn, leaky_relu): argument
77 r"""Given the linear, bn and leaky_relu modules, fuses them and returns the fused module
83 leaky_relu: LeakyReLU instance that needs to be fused with the linear layer
92 linear.training == bn.training and bn.training == leaky_relu.training
97 f"Cannot fuse train modules: {(linear, bn, leaky_relu)}"
106 fm = fused_module(fused_linear, leaky_relu)
110 f"Cannot fuse eval modules: {(linear, bn, leaky_relu)}"
571 # Configs for linear + leaky_relu fusion
577 F.leaky_relu,
585 # Configs for linear module + batchnorm + leaky_relu
/external/pytorch/torch/ao/nn/intrinsic/quantized/modules/
Dlinear_relu.py100 leaky_relu = mod[1]
109 mod.in_features, mod.out_features, leaky_relu.negative_slope, dtype=dtype
119 leaky_relu = ref_mod[1]
121 linear.in_features, linear.out_features, leaky_relu.negative_slope
/external/XNNPACK/src/subgraph/
Dleaky-relu.c45 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator()
53 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator()
61 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator()
72 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator()
274 node->params.leaky_relu.negative_slope = negative_slope; in xnn_define_leaky_relu()
/external/libopus/dnn/torch/osce/utils/layers/
Dtd_shaper.py125 alpha = F.leaky_relu(alpha, 0.2)
130 inno_alpha = F.leaky_relu(self.feature_alpha1b(f), 0.2)
134 inno_x = F.leaky_relu(self.feature_alpha1c(f), 0.2)
/external/pytorch/torch/ao/nn/intrinsic/modules/
Dfused.py205 def __init__(self, linear, leaky_relu): argument
207 type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU
208 ), f"Incorrect types for input modules{type(linear)}{type(leaky_relu)}"
209 super().__init__(linear, leaky_relu)
/external/pytorch/torch/nn/
Dinit.py102 >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
121 elif nonlinearity == "leaky_relu":
460 nonlinearity: str = "leaky_relu",
478 used with ``'leaky_relu'``)
484 recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
525 nonlinearity: str = "leaky_relu",
543 used with ``'leaky_relu'``)
549 recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
/external/executorch/kernels/portable/cpu/
Dop_leaky_relu.cpp47 ET_SWITCH_FLOAT_TYPES(in_type, ctx, "leaky_relu.out", CTYPE, [&]() { in leaky_relu_out()
50 sc_type, ctx, "leaky_relu.out", CTYPE_MIN, [&]() { in leaky_relu_out()
/external/tensorflow/tensorflow/lite/testing/op_tests/
Dleaky_relu.py15 """Test configs for leaky_relu."""
40 out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
/external/executorch/backends/apple/mps/operators/
Dactivation_ops.py47 target = ["aten.relu.default", "aten.leaky_relu.default", "aten.gelu.default"]
53 exir_ops.edge.aten.leaky_relu.default: MPSLeakyReLU,
/external/executorch/backends/qualcomm/builders/
Dop_prelu.py27 target = ["aten.leaky_relu.default", "aten.prelu.default"]
47 if node.target.__name__ == "aten.leaky_relu.default":
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/
Drelu_op_test.py283 tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
321 *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
331 *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
341 y = nn_ops.leaky_relu(x)
359 y = nn_ops.leaky_relu(x)
374 return nn_ops.leaky_relu(x, 0.05)**2
384 nn_ops.leaky_relu(
390 nn_ops.leaky_relu(
/external/pytorch/benchmarks/operator_benchmark/pt/
Dqactivation_test.py50 ("functional.leaky_relu", qF.leaky_relu),
/external/pytorch/torch/csrc/api/include/torch/nn/modules/container/
Dfunctional.h34 /// you want to wrap `torch::leaky_relu`, which accepts a `slope` scalar as its
41 /// Functional(torch::leaky_relu, /*slope=*/0.5)
/external/pytorch/torch/ao/ns/fx/
Dmappings.py219 # F.leaky_relu
221 F.leaky_relu,
498 F.leaky_relu,
526 toq.leaky_relu,
/external/pytorch/torch/distributed/_shard/sharded_tensor/_ops/
Dinit.py72 used with ``'leaky_relu'``)
78 recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
/external/executorch/backends/xnnpack/partition/
Dconfigs.py64 exir_ops.edge.aten.leaky_relu.default,
100 exir_ops.edge.aten.leaky_relu.default,
/external/pytorch/aten/src/ATen/native/mkldnn/
DUtils.cpp93 "leaky_relu is expected to have one scalar input: negative_slope"); in __anonb633e42f0102()
152 {"leaky_relu", attr_func_leaky_relu}, in fusion_unary_attr_map()
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/include/
Dpytorch_qnnpack.h364 pytorch_qnnp_operator_t* leaky_relu);
367 pytorch_qnnp_operator_t leaky_relu,
/external/pytorch/test/nn/
Dtest_init.py73 for fn in ["sigmoid", "tanh", "relu", "leaky_relu"]:
81 elif fn == "leaky_relu": # sqrt(2 / 1 + slope^2))
88 gain = init.calculate_gain("leaky_relu", param)
101 init.calculate_gain("leaky_relu", param)

12345678