Home
last modified time | relevance | path

Searched full:log_softmax (Results 1 – 25 of 193) sorted by relevance

12345678

/external/executorch/backends/vulkan/runtime/graph/ops/impl/
DSoftmax.cpp37 bool log_softmax) { in add_softmax_node() argument
63 if (log_softmax) { in add_softmax_node()
110 graph, args[0], args[1], args[3], /* log_softmax = */ false); in softmax()
113 void log_softmax(ComputeGraph& graph, const std::vector<ValueRef>& args) { in log_softmax() function
116 graph, args[0], args[1], args[3], /* log_softmax = */ true); in log_softmax()
121 VK_REGISTER_OP(aten._log_softmax.default, log_softmax);
DSoftmax.h24 bool log_softmax);
/external/ComputeLibrary/src/core/CL/cl_kernels/common/
Dsoftmax_layer.cl78 #if defined(LOG_SOFTMAX)
81 #else // defined(LOG_SOFTMAX)
83 #endif // defined(LOG_SOFTMAX)
189 #ifdef LOG_SOFTMAX
194 #else /* LOG_SOFTMAX */
199 #endif /* LOG_SOFTMAX */
211 #ifdef LOG_SOFTMAX
215 #else /* LOG_SOFTMAX */
219 #endif /* LOG_SOFTMAX */
407 #ifdef LOG_SOFTMAX
[all …]
/external/tensorflow/tensorflow/lite/testing/op_tests/
Dlog_softmax.py15 """Test configs for log_softmax."""
24 """Make a set of tests to do log_softmax."""
32 """Build the log_softmax op testing graph."""
38 out = tf.nn.log_softmax(input_tensor)
/external/executorch/backends/arm/_passes/
Ddecompose_softmaxes_pass.py14 torch_softmax = (torch.ops.aten.softmax.int, torch.ops.aten.log_softmax.int)
22 log_softmax = (torch.ops.aten.log_softmax.int, exir_ops.edge.aten._log_softmax.default) variable
74 if op in log_softmax:
/external/pytorch/torch/csrc/jit/tensorexpr/operators/
Dsoftmax.cpp11 bool log_softmax) { in computeSoftmax() argument
27 // log_softmax(vi) = log(softmax(vi)) in computeSoftmax()
31 // log_softmax(vi) = vi - max(vi) - log(sum(exp(vi - max(vi)))) in computeSoftmax()
39 // - Final loop computes the log_softmax for every element in v. in computeSoftmax()
129 if (!log_softmax) { in computeSoftmax()
Dsoftmax.h13 bool log_softmax);
/external/pytorch/torch/masked/
D__init__.py15 log_softmax,
42 "log_softmax",
/external/executorch/backends/arm/test/ops/
Dtest_logsoftmax.py52 .check(["torch.ops.aten.log_softmax.int"])
73 .check_not(["torch.ops.aten.log_softmax.int"])
97 .check_not(["torch.ops.aten.log_softmax.int"])
/external/pytorch/torch/csrc/api/src/nn/modules/
Dadaptive.cpp137 const Tensor cluster_logprob = F::log_softmax(cluster_output, 1); in forward()
159 const Tensor head_logprob = F::log_softmax(head_output, 1); in forward()
174 const Tensor head_logprob = F::log_softmax(head_output, 1); in _get_full_log_prob()
184 const Tensor cluster_logprob = F::log_softmax(cluster_output, 1); in _get_full_log_prob()
/external/executorch/kernels/portable/cpu/
Dop_log_softmax.cpp53 // calculate max in log_softmax dim. During log_softmax in log_softmax_out()
/external/pytorch/torch/_refs/special/
D__init__.py37 "log_softmax",
206 def log_softmax( function
211 return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
/external/pytorch/aten/src/ATen/native/metal/ops/
DMetalSoftmax.mm21 // TODO: [T87180544] Implement softmax/log_softmax in metal shaders
66 m.impl(TORCH_SELECTIVE_NAME("aten::log_softmax.int"), TORCH_FN(metal::log_softmax_int));
/external/executorch/exir/tests/
Dtest_op_convert.py64 aten.log_softmax.int: aten.log_softmax.int_out,
/external/pytorch/torch/nn/modules/
Dadaptive.py238 cluster_logprob = F.log_softmax(cluster_output, dim=1)
252 head_logprob = F.log_softmax(head_output, dim=1)
264 head_logprob = F.log_softmax(head_output, dim=1)
270 cluster_logprob = F.log_softmax(cluster_output, dim=1)
/external/pytorch/functorch/op_analysis/
Dpublic_api421 log_softmax
607 masked.log_softmax
/external/tensorflow/tensorflow/dtensor/mlir/expansions/
Dsoftmax_spmd_expander.cc159 auto log_softmax = builder.create<mlir::TF::SubOp>( in ComputeLogSoftmax() local
161 return log_softmax.getResult(); in ComputeLogSoftmax()
169 bool log_softmax) { in ComputeShardedSoftmax() argument
177 if (log_softmax) { in ComputeShardedSoftmax()
574 // softmax is 1 and log_softmax is 0. in ExpandOp()
579 const mlir::Value log_softmax = in ExpandOp() local
599 features_zero, log_softmax) in ExpandOp()
/external/executorch/backends/vulkan/runtime/graph/ops/glsl/
Dsoftmax.yaml19 - NAME: log_softmax
/external/pytorch/test/onnx/model_defs/
Dmnist.py21 return F.log_softmax(x, dim=1)
/external/tensorflow/tensorflow/python/ops/distributions/
Dcategorical.py317 nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
342 delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
343 nn_ops.log_softmax(b.logits))
/external/pytorch/aten/src/ATen/native/
DSoftMax.cpp27 #include <ATen/ops/log_softmax.h>
499 Tensor log_softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) { in log_softmax() function
551 return at::log_softmax(input, dim, dtype); in special_log_softmax()
568 Tensor log_softmax(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in log_softmax() function
569 return at::log_softmax(self, dimname_to_position(self, dim), dtype); in log_softmax()
/external/armnn/src/armnnTfLiteParser/test/
DLogSoftmax.cpp18 "operator_codes": [ { "builtin_code": "LOG_SOFTMAX" } ], in LogSoftmaxFixture()
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DSoftmax.cpp192 Tensor log_softmax( in log_softmax() function
211 m.impl("_log_softmax", TORCH_FN(log_softmax)); in TORCH_LIBRARY_IMPL()
/external/pytorch/docs/source/
Dspecial.rst36 .. autofunction:: log_softmax
/external/pytorch/torch/csrc/api/include/torch/nn/functional/
Dactivation.h307 inline Tensor log_softmax( in log_softmax() function
314 ret = input.log_softmax(dim); in log_softmax()
316 ret = input.log_softmax(dim, dtype); in log_softmax()
325 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.log_softmax
334 /// F::log_softmax(input, LogSoftmaxFuncOptions(1));
336 inline Tensor log_softmax( in log_softmax() function
339 return detail::log_softmax(input, options.dim(), options.dtype()); in log_softmax()

12345678