Home
last modified time | relevance | path

Searched full:softmax (Results 1 – 25 of 734) sorted by relevance

12345678910>>...30

/external/tensorflow/tensorflow/core/kernels/
Dsoftmax_op_functor.h29 // Computes Softmax or LogSoftmax activation.
32 // softmax: dims: batch_size, num_classes.
35 typename TTypes<T>::Matrix softmax, const bool log);
45 typename TTypes<T>::Matrix softmax, const bool log) { in Compute()
66 // Calculate the log of the softmax in Compute()
67 // softmax = logits - max(logits along classes); in Compute()
68 softmax.device(d) = shifted_logits; in Compute()
69 // softmax = softmax - log(sum(exp(softmax along classes))); in Compute()
70 softmax.device(d) = (softmax - softmax.exp() in Compute()
80 // softmax = exp(logits - max(logits along classes)); in Compute()
[all …]
/external/pytorch/torch/csrc/jit/tensorexpr/operators/
Dsoftmax.cpp1 #include <torch/csrc/jit/tensorexpr/operators/softmax.h>
12 // Softmax is computed as follows: in computeSoftmax()
13 // softmax(vi) = exp(vi) / sum(exp(vi)) in computeSoftmax()
17 // softmax(vi) = exp(vi - max(vi)) / sum(exp(vi - max(vi))) in computeSoftmax()
20 // - First loop computes the max over the softmax dim. in computeSoftmax()
22 // the max of the softmax dim it belongs to. in computeSoftmax()
23 // - Third loop computes the sum over the softmax dim. in computeSoftmax()
24 // - Final loop computes softmax for every element in v. in computeSoftmax()
27 // log_softmax(vi) = log(softmax(vi)) in computeSoftmax()
34 // - First loop computes the max over the softmax dim. in computeSoftmax()
[all …]
/external/executorch/backends/xnnpack/test/ops/
Dsoftmax.py14 class Softmax(torch.nn.Module): class in TestSoftmax
20 return torch.nn.Softmax(dim=self.dim)(x)
24 # as xnnpack only supports softmax on the last dimension.
29 Tester(self.Softmax(dim), inputs)
31 .check_count({"torch.ops.aten.softmax": 1})
52 # as xnnpack only supports softmax on the last dimension.
53 # This test validates the delegate does not attempt to delegate softmax
59 Tester(self.Softmax(dim), inputs)
61 .check_count({"torch.ops.aten.softmax": 1})
/external/executorch/backends/arm/test/ops/
Dtest_softmax.py33 """Tests softmax."""
35 class Softmax(torch.nn.Module): class in TestSoftmax
38 self.softmax = torch.nn.Softmax(dim=dim)
41 return self.softmax(x)
53 .check(["torch.ops.aten.softmax.int"])
74 .check_not(["torch.ops.aten.softmax.int"])
98 .check_not(["torch.ops.aten.softmax.int"])
128 self._test_softmax_tosa_MI_pipeline(self.Softmax(dim=dim), (test_data,))
137 self._test_softmax_tosa_BI_pipeline(self.Softmax(dim=dim), (test_data,))
146 self._test_softmax_tosa_u55_BI_pipeline(self.Softmax(dim=dim), (test_data,))
[all …]
/external/ComputeLibrary/arm_compute/runtime/CL/functions/
DCLSoftmaxLayer.h41 * Softmax is calculated by :
44 * Log Softmax is calculated by :
74 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
78 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
84 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
88 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
93 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
97 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
/external/tensorflow/tensorflow/core/kernels/sparse/
Dkernels_gpu.cu.cc348 T* softmax) { in CalculateRowSoftmax() argument
350 // softmax[row] = exp(shifted_logits[row]) / sum(exp(shifted_logits[row])) in CalculateRowSoftmax()
361 softmax[r_i] = exp_i; in CalculateRowSoftmax()
365 softmax[r_i] = softmax[r_i] / sum_exp; in CalculateRowSoftmax()
372 const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel2D() argument
379 softmax); in CSRSparseMatrixSoftmaxKernel2D()
397 const int* row_ptr, const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel3D() argument
414 softmax); in CSRSparseMatrixSoftmaxKernel3D()
481 const T* softmax, const int grad_softmax_begin, const int grad_softmax_end, in CalculateRowSoftmaxGrad() argument
490 // looking for matching indices. In the softmax indices only, perform: in CalculateRowSoftmaxGrad()
[all …]
Dsoftmax_op.cc16 // Implements the kernel for the CSRSoftmax op, which performs softmax
76 functor::CSRSparseMatrixSoftmax<Device, T> softmax; in Compute() local
78 ctx, softmax(ctx, *logits_matrix, output_matrix.values().vec<T>())); in Compute()
125 "dtype of softmax is not equal to 'type': ", in Compute()
140 "Ranks of softmax and grad_softmax matrices differ: ", in Compute()
146 "Ranks of softmax and grad_softmax matrices differ: ", in Compute()
159 "Shapes of softmax and grad_softmax matrices differ: ", in Compute()
164 // Allocate output shapes. Note that since the Softmax Gradient in Compute()
166 // softmax value, it will keep the sparsity structure of the softmax. in Compute()
211 OpKernelContext* ctx, const CSRSparseMatrix& softmax, \
/external/tensorflow/tensorflow/compiler/mlir/tosa/transforms/
Dpasses.td63 def TosaDequantizeTFLSoftmaxPass : Pass<"tosa-dequantize-tfl-softmax", "mlir::func::FuncOp"> {
64 let summary = "Dequantize TFLite Softmax ops.";
66 This pass rewrites quantized TFLite Softmax ops as: Dequantize, (float) Softmax, Quantize.
67 It is a work around for current performance issues with quantized Softmax codegen.
68 For instance it is a 20% end-to-end speedup on certain Softmax-heavy BERTs.
70 Softmax lowering. But as Softmax isn't currently a TOSA op, this isn't a TOSA
/external/tensorflow/tensorflow/compiler/mlir/tfrt/benchmarks/
Dsoftmax_op_benchmark.cc27 %result = "tf.Softmax"(%input)
34 std::string Softmax(llvm::ArrayRef<bool> dynamic_dims, in Softmax() function
47 OutT softmax) { in ComputeSoftmax() argument
66 softmax.device(d) = shifted_logits.exp(); in ComputeSoftmax()
67 softmax.device(d) = (softmax * softmax.sum(along_class) in ComputeSoftmax()
95 BM(JitrtV(NAME, Softmax({DYNAMIC_ROW, DYNAMIC_COL}, {ROWS, COLS}), "main", \
98 BM(Tfrt(NAME, Softmax({DYNAMIC_ROW, DYNAMIC_COL}, {ROWS, COLS}), "main", \
/external/tensorflow/tensorflow/python/keras/
Dactivations.py27 # In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
28 # layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
35 'softmax_v2': 'softmax',
39 @keras_export('keras.activations.softmax')
41 def softmax(x, axis=-1): function
42 """Softmax converts a vector of values to a probability distribution.
49 Softmax is often used as the activation for the last
53 The softmax of each vector x is computed as
60 axis: Integer, axis along which the softmax normalization is applied.
63 Tensor, output of softmax transformation (all values are non-negative
[all …]
/external/pytorch/benchmarks/operator_benchmark/pt/
Dsoftmax_test.py8 Microbenchmarks for the softmax operators.
12 # Configs for softmax ops
39 ["Softmax", nn.Softmax],
48 ["Softmax", nn.Softmax],
/external/executorch/backends/vulkan/runtime/graph/ops/impl/
DSoftmax.cpp40 "Vulkan softmax only supports texture storage"); in add_softmax_node()
53 "Softmax shader currently does not support concat dim == reduce dim"); in add_softmax_node()
56 "Softmax shader currently does not support concat dim == reduce dim"); in add_softmax_node()
60 std::string kernel_name = "softmax"; in add_softmax_node()
67 // This should match the value of MAX_NTHREADS in the softmax shader. in add_softmax_node()
107 void softmax(ComputeGraph& graph, const std::vector<ValueRef>& args) { in softmax() function
120 VK_REGISTER_OP(aten._softmax.default, softmax);
/external/libtextclassifier/native/lang_id/common/math/
Dsoftmax.cc17 #include "lang_id/common/math/softmax.h"
35 // Standard softmax formula for label's probability is in ComputeSoftmaxProbability()
76 std::vector<float> softmax; in ComputeSoftmax() local
77 softmax.reserve(scores.size()); in ComputeSoftmax()
79 return softmax; in ComputeSoftmax()
98 softmax.push_back(exp_scores[i] / denominator); in ComputeSoftmax()
100 return softmax; in ComputeSoftmax()
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DSoftmax.pbtxt2 name: "Softmax"
8 name: "softmax"
24 name: "Softmax"
30 name: "softmax"
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DSoftmax.pbtxt2 name: "Softmax"
8 name: "softmax"
24 name: "Softmax"
30 name: "softmax"
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_Softmax.pbtxt2 graph_op_name: "Softmax"
10 name: "softmax"
15 summary: "Computes softmax activations."
19 $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
/external/tensorflow/tensorflow/core/kernels/mkl/
Dmkl_softmax_op.cc61 // Softmax forward execute
105 // Softmax primitive.
121 // Softmax forward primitive setup
123 // Create memory descriptors for softmax data with specified format. in Setup()
128 // Create softmax descriptor and primitive descriptor. in Setup()
140 // Create softmax primitive and add it to net in Setup()
159 // Get a softmax fwd primitive from the cached pool. in Get()
239 // In MKL, data format passed to mkl softmax op depends on dimension of in Compute()
247 // dimension to do softmax. in Compute()
284 // Get a softmax fwd primitive from primitive pool. in Compute()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/
Dsoftmax_quantized_test.cc45 // float Softmax. in RunSoftmaxFloatReference()
53 optimized_ops::Softmax(sm_params, shape_common, reference_dequant_data.data(), in RunSoftmaxFloatReference()
55 // Work with quantized scaling for Softmax, under which 256 represents 1, but in RunSoftmaxFloatReference()
104 // Runs the Softmax and compares against the float reference implementation and
138 optimized_ops::Softmax(params, shape_common, input_data, shape_common, in RunOneSoftmaxTest()
140 reference_ops::Softmax(params, shape_common, input_data, shape_common, in RunOneSoftmaxTest()
167 // This function picks some random Softmax params, which are checked for
169 // it runs the Softmax test and returns true. This allows the caller
176 // Softmax, the width and height really just create test repetitions. in TryOneUniformSoftmax()
202 // Softmax may adapt as they traverse the depth, and so we test handling of
[all …]
/external/ComputeLibrary/src/core/helpers/
DSoftmaxHelpers.h33 /** Given a softmax axis, this function returns the permutation vector required to put the axis to …
37 * Axis selects the dimension on which softmax is performed.
38 * E.g. For input of shape 4x5x6 and axis=1, softmax will be applied to 4x6=24 vectors of size 5.
39 …* Interally softmax kernels is always performed on the first dimension (front dimension), therefor…
42 …* @param[in] axis Axis on which to perform softmax. Supported: 1, 2, 3 (0 implies no permutation n…
/external/libtextclassifier/native/lang_id/common/
Dembedding-network-params.h143 // Returns true if a softmax layer exists.
148 // Returns weight matrix for the softmax layer. Note: should be called only
153 SAFTM_CHECK(HasSoftmax()) << "No softmax layer."; in GetSoftmaxMatrix()
164 // Returns bias for the softmax layer. Technically a Matrix, but we expect it
168 SAFTM_CHECK(HasSoftmax()) << "No softmax layer."; in GetSoftmaxBias()
255 // ** Access methods for optional MatrixParams softmax.
257 // Returns 1 if proto has optional field softmax, 0 otherwise.
260 // Returns number of rows of transpose(proto.softmax()).
263 // Returns number of columns of transpose(proto.softmax()).
266 // Returns quantization mode for the softmax weights.
[all …]
/external/armnn/src/backends/backendsCommon/test/
DJsonPrinterTestImpl.cpp142 IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax"); in GetSoftmaxProfilerJson() local
145 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); in GetSoftmaxProfilerJson()
146 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0)); in GetSoftmaxProfilerJson()
157 softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); in GetSoftmaxProfilerJson()
176 // one of inputs is sufficiently larger than the others to saturate softmax in GetSoftmaxProfilerJson()
269 …bool softmaxCheck = ((result.find("softmax") != std::string::npos) || // Validate softm… in RunSoftmaxProfilerJsonPrinterTest()
270 (result.find("Softmax") != std::string::npos) || in RunSoftmaxProfilerJsonPrinterTest()
271 (result.find("SoftMax") != std::string::npos)); in RunSoftmaxProfilerJsonPrinterTest()
/external/tensorflow/tensorflow/python/ops/
Dnn_grad.py280 @ops.RegisterGradient("Softmax")
282 """The derivative of the softmax nonlinearity.
285 The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
289 grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
292 op: the Softmax op.
293 grad_softmax: the tensor representing the gradient w.r.t. the softmax
297 gradient w.r.t the input to the softmax
300 softmax = op.outputs[0]
301 sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
302 return (grad_softmax - sum_channels) * softmax
[all …]
/external/libtextclassifier/native/utils/math/
Dsoftmax.cc17 #include "utils/math/softmax.h"
33 // Standard softmax formula for label's probability is in ComputeSoftmaxProbability()
77 std::vector<float> softmax; in ComputeSoftmax() local
80 softmax.reserve(scores_size); in ComputeSoftmax()
99 softmax.push_back(exp_scores[i] / denominator); in ComputeSoftmax()
101 return softmax; in ComputeSoftmax()
Dsoftmax.h24 // Computes probability of a softmax label. Parameter "scores" is the vector of
25 // softmax logits. Returns 0.0f if "label" is outside the range [0,
29 // Computes and returns a softmax for a given vector of floats. Parameter
30 // "scores" is the vector of softmax logits.
/external/pytorch/test/inductor/
Dtest_fused_attention.py117 .softmax(dim=-1)
145 .softmax(dim=-1)
255 .softmax(dim=-1)
276 .softmax(dim=-1)
289 torch.matmul(query, key.transpose(-2, -1)).div(3.0).softmax(dim=-1),
309 torch.matmul(query, key.transpose(-2, -1)).mul(0.4).softmax(dim=-1),
328 attn_weight = torch.softmax(
339 attn_weight = torch.softmax(
359 attn_weight = torch.softmax(
379 attn_weight = torch.softmax(div, dim=-1)
[all …]

12345678910>>...30