/external/tensorflow/tensorflow/contrib/keras/api/keras/activations/ |
D | __init__.py | 22 from tensorflow.python.keras.activations import elu 23 from tensorflow.python.keras.activations import hard_sigmoid 24 from tensorflow.python.keras.activations import linear 25 from tensorflow.python.keras.activations import relu 26 from tensorflow.python.keras.activations import selu 27 from tensorflow.python.keras.activations import sigmoid 28 from tensorflow.python.keras.activations import softmax 29 from tensorflow.python.keras.activations import softplus 30 from tensorflow.python.keras.activations import softsign 31 from tensorflow.python.keras.activations import tanh [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | activations_test.py | 43 fn = keras.activations.get(name) 44 ref_fn = getattr(keras.activations, name) 46 config = keras.activations.serialize(fn) 47 fn = keras.activations.deserialize(config) 53 fn_v2 = keras.activations.get(fn_v2_key) 54 config = keras.activations.serialize(fn_v2) 55 fn = keras.activations.deserialize(config) 60 f = keras.backend.function([x], [keras.activations.softmax(x)]) 69 keras.activations.softmax(x) 73 f = keras.backend.function([x], [keras.activations.softmax(x)]) [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op_functor.h | 34 typename TTypes<T>::Tensor activations) { in operator() 35 activations.device(d) = features.cwiseMax(static_cast<T>(0)); in operator() 67 typename TTypes<T>::Tensor activations) { in operator() 68 activations.device(d) = in operator() 102 T alpha, typename TTypes<T>::Tensor activations) { in operator() 103 activations.device(d) = features.cwiseMax(features * alpha); in operator() 132 typename TTypes<T>::Tensor activations) { in operator() 134 activations.device(d) = in operator() 150 typename TTypes<T>::ConstTensor activations, in operator() 153 (activations < static_cast<T>(0)) in operator() [all …]
|
D | relu_op.cc | 84 typename TTypes<T>::Tensor activations); \ 97 typename TTypes<T>::Tensor activations); \ 110 typename TTypes<T>::Tensor activations); \ 123 typename TTypes<T>::Tensor activations); \ 129 typename TTypes<T>::ConstTensor activations, \ 136 typename TTypes<T>::Tensor activations); \ 142 typename TTypes<T>::ConstTensor activations, \ 149 typename TTypes<qint8>::Tensor activations);
|
D | softsign_op.h | 35 typename TTypes<T>::Tensor activations) { in operator() 36 activations.device(d) = in operator()
|
D | softplus_op.h | 35 typename TTypes<T>::Tensor activations) { in operator() 50 activations.device(d) = too_large.select( in operator()
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
D | rnn_common.py | 182 def select_last_activations(activations, sequence_lengths): argument 196 'select_last_activations', values=[activations, sequence_lengths]): 197 activations_shape = array_ops.shape(activations) 203 reshaped_activations = array_ops.reshape(activations, 208 [activations.get_shape()[0], activations.get_shape()[2]]) 212 def mask_activations_and_labels(activations, labels, sequence_lengths): argument 232 values=[activations, labels, sequence_lengths]): 238 activations_masked = array_ops.reshape(activations, 243 activations_masked = array_ops.boolean_mask(activations, mask) 248 def multi_value_predictions(activations, target_column, problem_type, argument [all …]
|
D | rnn_common_test.py | 39 activations = np.random.rand(batch_size, padded_length, num_classes) 43 constant_op.constant(activations, dtype=dtypes.float32), 65 expected_activations = activations[i, j, :] 90 activations = np.random.rand(batch_size, padded_length, num_classes) 92 constant_op.constant(activations, dtype=dtypes.float32), 106 expected_activations = activations[i, sequence_length[i] - 1, :]
|
D | dynamic_rnn_estimator.py | 252 activations = layers.fully_connected( 257 return activations, final_state 260 def _single_value_predictions(activations, argument 290 activations, sequence_length) 308 activations, labels, sequence_length, target_column, features): argument 326 activations, labels, sequence_length) 331 activations, labels, sequence_length, target_column, features): argument 350 activations, sequence_length)
|
D | state_saving_rnn_estimator.py | 81 activations = layers.fully_connected( 89 return activations, final_state 93 activations, labels, sequence_length, target_column, features): argument 111 activations, labels, sequence_length)
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | activations.cc | 38 namespace activations { namespace 967 activations::GenericPrepare, in Register_ELU() 968 activations::EluEval}; in Register_ELU() 974 activations::GenericPrepare, in Register_RELU() 975 activations::ReluEval}; in Register_RELU() 981 activations::GenericPrepare, in Register_RELU_N1_TO_1() 982 activations::Relu1Eval}; in Register_RELU_N1_TO_1() 988 activations::GenericPrepare, in Register_RELU6() 989 activations::Relu6Eval}; in Register_RELU6() 995 activations::Init, activations::Free, activations::TanhPrepare, in Register_TANH_REF() [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_embedding_gradient.py | 30 def get_gradients_through_compute_gradients(optimizer, loss, activations): argument 43 activation_list = activations.values() 47 zip(activations.keys(), grads)) 101 def hook_dummy_table_variables_to_activations(tpu_embedding, activations, argument 117 for feature in activations: 121 activations[feature],
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cudnn_conv_rewriter_test.cc | 100 HloInstruction* activations = in TEST_F() local 111 activations->shape(), gradients->shape(), /*feature_group_count=*/1, in TEST_F() 115 activations, gradients, /*feature_group_count=*/1, in TEST_F() 141 HloInstruction* activations = in TEST_F() local 151 activations->shape(), gradients->shape(), /*feature_group_count=*/1, in TEST_F() 155 activations, gradients, /*feature_group_count=*/1, in TEST_F() 171 HloInstruction* activations = in TEST_F() local 185 ShapeUtil::MakeShape(F32, {32, 3, 3, 32}), activations, gradients, in TEST_F() 201 HloInstruction* activations = in TEST_F() local 215 ShapeUtil::MakeShape(F32, {320, 3, 3, 192}), activations, gradients, in TEST_F() [all …]
|
/external/tensorflow/tensorflow/lite/experimental/micro/kernels/ |
D | softmax.cc | 28 namespace activations { namespace 205 static TfLiteRegistration r = {activations::Init, activations::Free, in Register_SOFTMAX() 206 activations::SoftmaxPrepare, in Register_SOFTMAX() 207 activations::SoftmaxEval}; in Register_SOFTMAX()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_TPUEmbeddingActivations.pbtxt | 13 The embedding activations Tensor to return. 20 these activations were computed. 27 activations.
|
D | api_def_RecvTPUEmbeddingActivations.pbtxt | 7 A TensorList of embedding activations containing one Tensor per 24 summary: "An op that receives embedding activations on the TPU." 30 one Tensor of activations per table specified in the model. There can be at
|
/external/tensorflow/tensorflow/lite/tools/optimize/testdata/ |
D | README.md | 11 All activations have min maxes and activations are in range [0,10]. 16 as 127. The activations are all in range: [-128, 127].
|
/external/tensorflow/tensorflow/contrib/gan/python/eval/python/ |
D | classifier_metrics_impl.py | 270 activations = run_image_classifier(images, graph_def, input_tensor, 272 if isinstance(activations, list): 273 for i, activation in enumerate(activations): 275 activations[i] = layers.flatten(activation) 277 if array_ops.rank(activations) != 2: 278 activations = layers.flatten(activations) 280 return activations
|
/external/tensorflow/tensorflow/lite/g3doc/performance/ |
D | model_optimization.md | 37 representations of weights and, optionally, activations for both storage and 41 * Quantization of activations reduces memory access costs for reading and storing intermediate acti… 46 * [Post-training quantization](post_training_quantization.md) quantizes weights and activations pos…
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | batch_norm_op.cc | 135 auto activations = in Compile() local 166 xla::BatchNormGrad(activations, scale, mean, var, grad_backprop, in Compile() 198 xla::Mul(grad_backprop, xla::Sub(activations, mean, {feature_index})); in Compile()
|
D | conv_op_helpers.cc | 395 StringPiece type_string, xla::XlaOp activations, in MakeXlaBackpropFilterConvOp() argument 400 auto* builder = activations.builder(); in MakeXlaBackpropFilterConvOp() 402 builder->GetShape(activations)); in MakeXlaBackpropFilterConvOp() 533 activations, gradients, window_strides, padding, /*lhs_dilation=*/ones, in MakeXlaBackpropFilterConvOp() 540 filter_shape, filter_backprop, activations.builder()); in MakeXlaBackpropFilterConvOp()
|
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
D | mnist_with_summaries.py | 97 activations = act(preactivate, name='activation') 98 tf.summary.histogram('activations', activations) 99 return activations
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | local.py | 21 from tensorflow.python.keras import activations 146 self.activation = activations.get(activation) 272 activations.serialize(self.activation), 421 self.activation = activations.get(activation) 560 activations.serialize(self.activation),
|
/external/gemmlowp/doc/ |
D | public.md | 120 multiplication), while the RHS and result are neural network activations, 121 respectively the input and output activations of the layer. 123 Because the RHS and result are activations, we want them to share the same 124 storage order -- so that one layer's output activations can be readily used as 125 the next layer's input activations. Thus, we focus on `RhsOrder=ResultOrder`.
|
/external/tensorflow/tensorflow/core/protobuf/tpu/ |
D | tpu_embedding_configuration.proto | 35 // Number of samples in each batch of embedding layer activations sent to 73 // that the activations on every step observe the gradient updates from the 83 // is complete. The drawback is that embedding activations for step N+1 do not
|