/external/pytorch/test/onnx/ |
D | test_onnxscript_no_runtime.py | 27 # 1. Register Selu onnxscript function as custom Op 31 def Selu(X): function 43 return g.onnxscript_op(Selu, X).setType(X.type()) 46 symbolic_name="aten::selu", 88 model_selu = torch.nn.SELU() 106 self.assertEqual(selu_proto.functions[0].name, "Selu") 115 self.selu = torch.nn.SELU() 122 y = self.selu(x) 135 def Selu(X): function 150 return g.onnxscript_op(Selu, X).setType(X.type()) [all …]
|
D | test_onnxscript_runtime.py | 24 model = torch.nn.SELU() 31 def Selu( function 45 return g.onnxscript_op(Selu, X).setType(X.type()) 48 symbolic_name="aten::selu",
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SeluGrad.pbtxt | 7 The backpropagated gradients to the corresponding Selu operation. 13 The outputs of the corresponding Selu operation. 23 summary: "Computes gradients for the scaled exponential linear (Selu) operation."
|
D | api_def_Selu.pbtxt | 2 graph_op_name: "Selu"
|
/external/tensorflow/tensorflow/core/kernels/mlir_generated/ |
D | gpu_op_selu.cc | 21 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Selu, DT_HALF); 22 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Selu, DT_FLOAT); 23 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Selu, DT_DOUBLE);
|
/external/tensorflow/tensorflow/core/api_def/python_api/ |
D | api_def_Selu.pbtxt | 2 graph_op_name: "Selu" 4 name: "nn.selu"
|
/external/tensorflow/tensorflow/core/api_def/java_api/ |
D | api_def_Selu.pbtxt | 2 graph_op_name: "Selu" 4 name: "nn.Selu"
|
/external/tensorflow/tensorflow/python/ops/ |
D | nn_grad_test.py | 228 selu = gen_nn_ops.selu(inputs) 229 selu_grad = gradients_impl.gradients(selu, inputs, grad_ys=dummy)[0] 245 selu = gen_nn_ops.selu(inputs) 246 selu_grad = gradients_impl.gradients(selu, inputs, grad_ys=dummy)[0]
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | Selu.pbtxt | 2 name: "Selu" 24 name: "Selu"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | Selu.pbtxt | 2 name: "Selu" 24 name: "Selu"
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op_functor.h | 174 struct Selu { struct 175 // Computes Selu activation. 198 // gradients: gradients backpropagated to the Selu op. argument 199 // activations: outputs of the Selu op. 200 // backprops: gradients to backpropagate to the Selu inputs.
|
D | relu_op.cc | 75 Name("Selu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 81 // Elu and Selu only make sense with float or double. 104 void Selu<GPUDevice, T>::operator()( \ 107 extern template struct Selu<GPUDevice, T>; 120 Name("Selu").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
|
D | unary_ops_composition.cc | 171 // Register compute function for the Relu/Relu6/Elu/Selu. 204 auto selu = functor::Selu<Eigen::DefaultDevice, T>(); \ 205 selu(Eigen::DefaultDevice(), in, *out); \ 255 REGISTER_COMPUTE_FN(Selu); in UnaryOpsCompositionSupport() 320 REGISTER_COMPUTE_FN(Selu); in UnaryOpsCompositionSupport() 384 REGISTER_COMPUTE_FN(Selu); in UnaryOpsCompositionSupport()
|
/external/tensorflow/tensorflow/python/keras/ |
D | activations.py | 146 @keras_export('keras.activations.selu') 148 def selu(x): function 149 """Scaled Exponential Linear Unit (SELU). 151 The Scaled Exponential Linear Unit (SELU) activation function is defined as: 159 Basically, the SELU activation function multiplies `scale` (> 1) with the 175 ... activation='selu')) 177 ... activation='selu')) 179 ... activation='selu')) 197 return nn.selu(x)
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | elu_op.cc | 35 XlaOp Selu(XlaOp x) { in Selu() function 82 ctx->SetOutput(0, xla::Selu(ctx->Input(0))); in Compile() 107 REGISTER_XLA_OP(Name("Selu"), SeluOp);
|
D | elu_op.h | 23 XlaOp Selu(XlaOp x);
|
/external/pytorch/torch/nn/modules/ |
D | __init__.py | 21 SELU, 300 "SELU",
|
/external/pytorch/torch/_refs/nn/functional/ |
D | __init__.py | 57 "selu", 119 # alpha = - SELU.alpha * SELU.scale, here 120 # SELU.alpha = 1.6732632423543772848170429916717 and 121 # SELU.scale = 1.0507009873554804934193349852946 400 @register_decomposition(aten.selu) 407 def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: function 409 Reference implementation of torch.nn.functional.selu 1278 selu_ = _make_inplace(selu)
|
/external/tensorflow/tensorflow/core/kernels/mlir_generated/op_definitions/ |
D | selu.mlir.tmpl | 3 %0 = "tf.Selu"(%arg0) : (tensor<*xelem_type>) -> tensor<*xoutput_type>
|
/external/pytorch/test/onnx/expect/ |
D | TestOperators.test_selu.expect | 9 op_type: "Selu"
|
/external/pytorch/functorch/dim/ |
D | op_properties.py | 271 torch.selu, 272 torch.nn.functional.selu,
|
/external/pytorch/torch/ao/pruning/_experimental/pruner/ |
D | base_structured_sparsifier.py | 53 F.selu, 80 nn.SELU,
|
/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
D | activation.h | 49 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 51 /// Applies the selu function element-wise. 52 /// See https://pytorch.org/docs/main/nn.html#torch.nn.SELU to learn 60 /// SELU model(SELUOptions().inplace(true)); 70 /// Pretty prints the `SELU` module into the given `stream`. 79 /// provides, and examples of how to use `SELU` with `torch::nn::SELUOptions`. 82 TORCH_MODULE(SELU);
|
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
D | relu_op_test.py | 519 tf_selu = nn_ops.selu(np_features) 537 nn_ops.selu, [x], delta=1.0 / 1024)) 545 *gradient_checker_v2.compute_gradient(nn_ops.selu, [x])) 555 y = nn_ops.selu(x) 573 y = nn_ops.selu(x)
|
/external/pytorch/torch/csrc/jit/passes/ |
D | restore_mutation.h | 28 {aten::selu, false},
|