/external/tensorflow/tensorflow/python/ops/distributions/ |
D | categorical.py | 317 nn_ops.log_softmax(self.logits) * self.probs, axis=-1) 342 delta_log_probs1 = (nn_ops.log_softmax(a.logits) - 343 nn_ops.log_softmax(b.logits))
|
D | multinomial.py | 281 return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1)
|
/external/tensorflow/tensorflow/python/keras/ |
D | activations.py | 515 log_softmax = nn.log_softmax variable
|
/external/tensorflow/tensorflow/dtensor/mlir/expansions/ |
D | softmax_spmd_expander.cc | 159 auto log_softmax = builder.create<mlir::TF::SubOp>( in ComputeLogSoftmax() local 161 return log_softmax.getResult(); in ComputeLogSoftmax() 169 bool log_softmax) { in ComputeShardedSoftmax() argument 177 if (log_softmax) { in ComputeShardedSoftmax() 579 const mlir::Value log_softmax = in ExpandOp() local 599 features_zero, log_softmax) in ExpandOp()
|
/external/tensorflow/tensorflow/lite/testing/op_tests/ |
D | log_softmax.py | 38 out = tf.nn.log_softmax(input_tensor)
|
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
D | softmax_op_test.py | 71 tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name) 122 tf_log_softmax = nn_ops.log_softmax(features)
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_tensor_test_ops.py | 80 nn_ops.log_softmax,
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | BUILD | 555 "reference/log_softmax.h", 587 "reference/integer_ops/log_softmax.h", 659 "reference/log_softmax.h",
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/ |
D | get-arithmetic-count.mlir | 86 %0 = "tfl.log_softmax"(%arg0) : (tensor<10x10xf32>) -> tensor<10x10xf32>
|
D | legalize-tf.mlir | 255 func.func @log_softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> { 258 // CHECK-LABEL: log_softmax 259 // CHECK: "tfl.log_softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
|
/external/tensorflow/tensorflow/python/kernel_tests/distributions/ |
D | categorical_test.py | 339 log_probabilities = nn_ops.log_softmax(logits)
|
/external/tensorflow/tensorflow/lite/testing/ |
D | generate_examples_lib.py | 108 from tensorflow.lite.testing.op_tests.log_softmax import make_log_softmax_tests
|
D | build_def.bzl | 88 "log_softmax",
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.nn.pbtxt | 220 name: "log_softmax"
|
D | tensorflow.math.pbtxt | 264 name: "log_softmax"
|
/external/tensorflow/tensorflow/python/ops/ |
D | nn_ops.py | 3918 def log_softmax(logits, axis=None, name=None, dim=None): function 3943 return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) 3971 return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
|
D | ctc_ops.py | 702 ilabel_log_probs = nn_ops.log_softmax(logits)
|
D | nn_grad.py | 534 …return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits)) # pylint: disable=invalid-unar…
|
/external/tensorflow/tensorflow/python/ops/numpy_ops/g3doc/ |
D | TensorFlow_NumPy_Text_Generation.ipynb | 804 …"We define the loss function from scratch, using `tf.nn.log_softmax`. (Our definition is the same … 819 " predictions = tf.nn.log_softmax(predictions)\n",
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.math.pbtxt | 264 name: "log_softmax"
|
D | tensorflow.nn.pbtxt | 256 name: "log_softmax"
|
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/ |
D | optimize_patterns.td | 901 (ArgMinMaxOp (TFL_LogSoftmaxOp:$log_softmax $logits), 904 [(HasOneUse $log_softmax),
|
/external/tensorflow/tensorflow/python/ops/parallel_for/ |
D | control_flow_ops_test.py | 767 return (nn.log_softmax(logits_i), nn.log_softmax(logits_i, axis=0), 768 nn.log_softmax(logits_i, axis=-1))
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/ |
D | BUILD | 681 "convert/ops/log_softmax.cc",
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | unary_ops_test.py | 369 nn_ops.log_softmax,
|