/external/tensorflow/tensorflow/core/kernels/ |
D | softmax_op_functor.h | 35 typename TTypes<T>::Matrix softmax, const bool log); 45 typename TTypes<T>::Matrix softmax, const bool log) { in Compute() 73 softmax.device(d) = shifted_logits; in Compute() 75 softmax.device(d) = (softmax - softmax.exp() in Compute() 86 softmax.device(d) = shifted_logits.exp(); in Compute() 88 softmax.device(d) = (softmax * softmax.sum(along_class) in Compute()
|
D | softmax_op.cc | 39 typename TTypes<T>::Matrix softmax, const bool log) { in operator ()() 40 SoftmaxEigenImpl<Device, T>::Compute(d, logits, softmax, log); in operator ()()
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.keras.applications.pbtxt | 77 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 81 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 85 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 89 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 93 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 97 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 101 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 105 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 109 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 113 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " [all …]
|
D | tensorflow.keras.applications.efficientnet.pbtxt | 5 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 9 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 13 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 17 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 21 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 25 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 29 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 33 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], "
|
D | tensorflow.keras.applications.resnet_v2.pbtxt | 5 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 9 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 13 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], "
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.keras.applications.pbtxt | 77 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 81 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 85 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 89 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 93 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 97 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 101 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 105 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 109 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 113 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " [all …]
|
D | tensorflow.keras.applications.efficientnet.pbtxt | 5 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 9 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 13 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 17 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 21 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 25 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 29 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 33 …s=kwargs, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], "
|
D | tensorflow.keras.applications.resnet_v2.pbtxt | 5 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 9 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], " 13 …rds=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\', \'softmax\'], "
|
/external/libtextclassifier/native/lang_id/common/math/ |
D | softmax.cc | 75 std::vector<float> softmax; in ComputeSoftmax() local 76 softmax.reserve(scores.size()); in ComputeSoftmax() 78 return softmax; in ComputeSoftmax() 97 softmax.push_back(exp_scores[i] / denominator); in ComputeSoftmax() 99 return softmax; in ComputeSoftmax()
|
/external/tensorflow/tensorflow/core/kernels/sparse/ |
D | kernels_gpu.cu.cc | 348 T* softmax) { in CalculateRowSoftmax() argument 361 softmax[r_i] = exp_i; in CalculateRowSoftmax() 365 softmax[r_i] = softmax[r_i] / sum_exp; in CalculateRowSoftmax() 372 const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel2D() argument 379 softmax); in CSRSparseMatrixSoftmaxKernel2D() 397 const int* row_ptr, const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel3D() argument 414 softmax); in CSRSparseMatrixSoftmaxKernel3D() 481 const T* softmax, const int grad_softmax_begin, const int grad_softmax_end, in CalculateRowSoftmaxGrad() argument 501 sum_prod += ldg(softmax + i) * ldg(grad_softmax + j); in CalculateRowSoftmaxGrad() 528 gradient[i] = (ldg(grad_softmax + j) - sum_prod) * ldg(softmax + i); in CalculateRowSoftmaxGrad() [all …]
|
D | softmax_op.cc | 76 functor::CSRSparseMatrixSoftmax<Device, T> softmax; in Compute() local 78 ctx, softmax(ctx, *logits_matrix, output_matrix.values().vec<T>())); in Compute() 212 OpKernelContext* ctx, const CSRSparseMatrix& softmax, \
|
/external/libtextclassifier/native/utils/math/ |
D | softmax.cc | 77 std::vector<float> softmax; in ComputeSoftmax() local 80 softmax.reserve(scores_size); in ComputeSoftmax() 99 softmax.push_back(exp_scores[i] / denominator); in ComputeSoftmax() 101 return softmax; in ComputeSoftmax()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_Softmax.pbtxt | 10 name: "softmax" 15 summary: "Computes softmax activations." 19 $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
|
D | api_def_SparseMatrixSoftmax.pbtxt | 9 name: "softmax" 12 summary: "Calculates the softmax of a CSRSparseMatrix." 14 Calculate the softmax of the innermost dimensions of a SparseMatrix.
|
D | api_def_SparseMatrixSoftmaxGrad.pbtxt | 5 name: "softmax" 10 description: "The gradient of `softmax`."
|
D | api_def_SparseSoftmax.pbtxt | 28 summary: "Applies softmax to a batched N-D `SparseTensor`." 33 This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost 38 (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
|
D | api_def_StopGradient.pbtxt | 15 to pretend that the value was a constant. For example, the softmax function 20 def softmax(x): 39 However, when we backprop through the softmax to x, we dont want to backprop
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | softmax_op_test.py | 50 softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim) 52 res = np.log(softmax) 54 res = softmax 77 tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name) 240 op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]], 249 self.assertAllEqual(y, self.evaluate(nn_ops.softmax(x, axis=0))) 257 nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval() 264 nn_ops.softmax(ones, axis=2).eval() 276 y = nn_ops.softmax(x)
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/ |
D | post-quantize.mlir | 39 …%5 = "tfl.softmax"(%4) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform<u8:f32, 0.023… 58 // CHECK-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor… 59 // CHECK-NEXT: return %[[softmax]] : tensor<1x401408x!quant.uniform<u8:f32, 3.906250e-03>> 71 // CHECK-NEXT: %[[softmax:.*]] = "tfl.softmax"(%arg0) {beta = 1.000000e+00 : f32} : (tensor<128x16… 72 %0 = "tfl.softmax"(%arg0) {beta = 1.000000e+00 : f32} : (tensor<128x16xf32>) -> tensor<128x16xf32> 75 // CHECK-NEXT: %[[argmax:.*]] = "tfl.arg_max"(%[[softmax]], %[[cst]]) : (tensor<128x16xf32>, tenso… 77 // CHECK-NEXT: return %[[softmax]], %[[argmax]] : tensor<128x16xf32>, tensor<128xi32>
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | Softmax.pbtxt | 8 name: "softmax" 30 name: "softmax"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | Softmax.pbtxt | 8 name: "softmax" 30 name: "softmax"
|
/external/tensorflow/tensorflow/lite/toco/tflite/ |
D | op_version_test.cc | 46 std::unique_ptr<SoftmaxOperator> softmax(new SoftmaxOperator()); in TEST() local 49 softmax->inputs.push_back(softmax_input); in TEST() 50 softmax->outputs.push_back(softmax_output); in TEST() 54 model.operators.push_back(std::move(softmax)); in TEST()
|
/external/tensorflow/tensorflow/core/ops/ |
D | sparse_csr_matrix_ops.cc | 532 ShapeHandle softmax = sparse_matrix_shape_and_type.shape; in __anon59a9d6c30e02() local 533 TF_RETURN_IF_ERROR(c->WithRankAtLeast(softmax, 2, &softmax)); in __anon59a9d6c30e02() 534 TF_RETURN_IF_ERROR(c->WithRankAtMost(softmax, 3, &softmax)); in __anon59a9d6c30e02() 535 if (!c->RankKnown(softmax)) { in __anon59a9d6c30e02() 545 TF_RETURN_IF_ERROR(c->Merge(softmax, grad_softmax, &softmax)); in __anon59a9d6c30e02() 547 0, {ShapeAndType{softmax, sparse_matrix_shape_and_type.dtype}}); in __anon59a9d6c30e02()
|
/external/tensorflow/tensorflow/python/ops/ |
D | nn_grad.py | 304 softmax = op.outputs[0] 305 sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True) 306 return (grad_softmax - sum_channels) * softmax 323 softmax = math_ops.exp(op.outputs[0]) 324 return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax 532 softmax = nn_ops.softmax(logits) 537 array_ops.expand_dims(softmax, 2)), 538 axis=1)) * softmax) 558 softmax = nn_ops.softmax(logits) 563 array_ops.expand_dims(softmax, 2)), [all …]
|
/external/libtextclassifier/native/lang_id/common/flatbuffers/ |
D | embedding-network.fbs | 84 // hidden layer or the final (output / softmax) layer. 93 // is generally used for softmax classification. That's why we say that the 94 // last layer is the "softmax layer". 113 // Hidden layers, followed by the final (softmax) layer.
|