Home
last modified time | relevance | path

Searched refs:activation (Results 1 – 25 of 303) sorted by relevance

12345678910>>...13

/external/tensorflow/tensorflow/lite/c/
Dbuiltin_op_data.h76 TfLiteFusedActivation activation; member
85 TfLiteFusedActivation activation; member
97 TfLiteFusedActivation activation; member
105 TfLiteFusedActivation activation; member
109 TfLiteFusedActivation activation; member
114 TfLiteFusedActivation activation; member
119 TfLiteFusedActivation activation; member
130 TfLiteFusedActivation activation; member
152 TfLiteFusedActivation activation; member
156 TfLiteFusedActivation activation; member
[all …]
/external/tensorflow/tensorflow/python/layers/
Dconvolutional.py86 activation=None, argument
105 activation=activation,
129 activation=None, argument
205 activation=activation,
285 activation=None, argument
304 activation=activation,
328 activation=None, argument
411 activation=activation,
492 activation=None, argument
511 activation=activation,
[all …]
Dcore.py86 activation=None, argument
99 activation=activation,
119 activation=None, argument
175 activation=activation,
/external/tensorflow/tensorflow/lite/kernels/
Dkernel_util.cc42 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, in PopulateConvolutionQuantizationParams() argument
100 CalculateActivationRangeUint8(activation, output, output_activation_min, in PopulateConvolutionQuantizationParams()
128 void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, in CalculateActivationRangeQuantizedImpl() argument
139 if (activation == kTfLiteActRelu) { in CalculateActivationRangeQuantizedImpl()
142 } else if (activation == kTfLiteActRelu6) { in CalculateActivationRangeQuantizedImpl()
145 } else if (activation == kTfLiteActRelu1) { in CalculateActivationRangeQuantizedImpl()
156 TfLiteFusedActivation activation, in CalculateActivationRangeQuantized() argument
175 CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, in CalculateActivationRangeQuantized()
180 void CalculateActivationRangeUint8(TfLiteFusedActivation activation, in CalculateActivationRangeUint8() argument
186 CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, in CalculateActivationRangeUint8()
[all …]
Dkernel_util.h91 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
112 TfLiteFusedActivation activation,
116 void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
119 void CalculateActivationRangeInt8(TfLiteFusedActivation activation,
125 void CalculateActivationRange(TfLiteFusedActivation activation, in CalculateActivationRange() argument
127 if (activation == kTfLiteActRelu) { in CalculateActivationRange()
130 } else if (activation == kTfLiteActRelu6) { in CalculateActivationRange()
133 } else if (activation == kTfLiteActRelu1) { in CalculateActivationRange()
/external/tensorflow/tensorflow/lite/core/api/
Dflatbuffer_conversions.cc112 auto parse_activation = [](ActivationFunctionType activation) { in ParseOpData() argument
113 switch (activation) { in ParseOpData()
159 params->activation = in ParseOpData()
204 params->activation = in ParseOpData()
218 params->activation = in ParseOpData()
231 params->activation = in ParseOpData()
241 params->activation = in ParseOpData()
253 params->activation = parse_activation( in ParseOpData()
264 params->activation = in ParseOpData()
285 params->activation = parse_activation( in ParseOpData()
[all …]
/external/tensorflow/tensorflow/python/keras/layers/
Drecurrent_v2.py151 activation='tanh', argument
181 activation=activation,
207 activation == 'tanh' and recurrent_activation == 'sigmoid' and
293 activation=self.activation,
310 activation=self.activation,
321 def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, activation, argument
376 hh = activation(x_h + r * recurrent_h)
520 activation='tanh', argument
550 activation=activation,
579 activation == 'tanh' and recurrent_activation == 'sigmoid' and
[all …]
Dconvolutional.py106 activation=None, argument
136 self.activation = activations.get(activation)
209 if self.activation is not None:
210 return self.activation(outputs)
250 'activation': activations.serialize(self.activation),
341 activation=None, argument
359 activation=activations.get(activation),
457 activation=None, argument
475 activation=activations.get(activation),
575 activation=None, argument
[all …]
Drecurrent.py1171 activation='tanh', argument
1187 self.activation = activations.get(activation)
1248 if self.activation is not None:
1249 output = self.activation(output)
1261 activations.serialize(self.activation),
1357 activation='tanh', argument
1384 activation=activation,
1419 def activation(self): member in SimpleRNN
1420 return self.cell.activation
1475 activations.serialize(self.activation),
[all …]
/external/tensorflow/tensorflow/contrib/model_pruning/python/layers/
Dcore_layers.py94 activation=None, argument
117 self.activation = activation
210 if self.activation is not None:
211 return self.activation(outputs)
300 activation=None, argument
318 activation=activation,
375 activation=None, argument
391 self.activation = activation
467 if self.activation is not None:
468 return self.activation(outputs) # pylint: disable=not-callable
/external/tensorflow/tensorflow/python/keras/
Dtesting_utils.py291 model.add(keras.layers.Dense(num_hidden, activation='relu',
294 model.add(keras.layers.Dense(num_hidden, activation='relu'))
295 activation = 'sigmoid' if num_classes == 1 else 'softmax'
296 model.add(keras.layers.Dense(num_classes, activation=activation))
302 outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
303 activation = 'sigmoid' if num_classes == 1 else 'softmax'
304 outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
313 self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
314 activation = 'sigmoid' if num_classes == 1 else 'softmax'
315 self.layer_b = keras.layers.Dense(num_classes, activation=activation)
[all …]
Dintegration_test.py48 [keras.layers.Dense(16, activation='relu'),
50 keras.layers.Dense(y_train.shape[-1], activation='softmax')],
79 activation='relu',
86 y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
122 keras.layers.GRU(y_train.shape[-1], activation='softmax')
153 activation='softmax',
184 keras.layers.Conv2D(4, 3, padding='same', activation='relu'),
189 keras.layers.Dense(y_train.shape[-1], activation='softmax')
228 keras.layers.Dense(10, activation=nn.relu),
230 keras.layers.Dense(y_train.shape[-1], activation=nn.softmax_v2),
Dactivations.py238 def serialize(activation): argument
239 if activation.__name__ in _TF_ACTIVATIONS_V2:
240 return _TF_ACTIVATIONS_V2[activation.__name__]
241 return activation.__name__
/external/tensorflow/tensorflow/contrib/quantize/python/
Dquantize_parameterized_test.py164 def _TestQuantize_Conv2dWithoutBatchNorm(self, activation, activation_op_name, argument
186 activation_fn = None if with_bypass else activation
200 node = activation(node, name=scope + delim + activation_op_name)
218 def _TestQuantize_FCWithoutBatchNorm(self, activation, activation_op_name, argument
238 activation_fn = None if with_bypass else activation
250 node = activation(node, name=scope + delim + activation_op_name)
265 self, activation, activation_op_name, with_bypass, delay, use_resource, argument
285 activation_fn = None if with_bypass else activation
301 node = activation(node, name=scope + delim + activation_op_name)
315 def _TestQuantize_AtrousConvWithoutBatchNorm(self, activation, argument
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/
Dconvolution_group_converter.cc203 auto activation = convolution->mutable_operand(0); in HandleBatchGroupCount() local
222 int64 input_batch = activation->shape().dimensions(input_batch_dimension); in HandleBatchGroupCount()
241 expanded_filter_shape, activation, filter, in HandleBatchGroupCount()
305 convert_back_shape.set_element_type(activation->shape().element_type()); in HandleBatchGroupCount()
403 auto activation = convolution->mutable_operand(0); in HandleConvolution() local
406 Shape reshaped_activation_shape = activation->shape(); in HandleConvolution()
413 activation = add( in HandleConvolution()
414 HloInstruction::CreateReshape(reshaped_activation_shape, activation)); in HandleConvolution()
443 new_output_shape, activation, filter, group_count, in HandleConvolution()
475 auto activation = convolution->mutable_operand(0); in HandleConvolution() local
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.cc26 TfLiteFusedActivation activation, in RnnBatchStep() argument
32 batch_size, output_batch_leading_dim, activation, in RnnBatchStep()
42 TfLiteFusedActivation activation, in RnnBatchStep() argument
70 output_ptr_batch, num_units * batch_size, activation, output_ptr_batch); in RnnBatchStep()
112 activation, output_ptr_batch + k * output_batch_leading_dim); in RnnBatchStep()
125 TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch, in RnnBatchStep() argument
134 output_batch_leading_dim, activation, quantized_input_ptr_batch, in RnnBatchStep()
147 TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch, in RnnBatchStep() argument
223 output_ptr_batch, num_units * batch_size, activation, output_ptr_batch); in RnnBatchStep()
312 activation, output_ptr_batch + k * output_batch_leading_dim); in RnnBatchStep()
Dkernel_utils.h43 TfLiteFusedActivation activation,
53 TfLiteFusedActivation activation,
71 TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch,
82 TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch,
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Delu_op.cc52 const auto activation = ctx->Input(1); in Compile() local
53 const auto exp_grad = xla::Mul(grad, xla::Add(activation, one)); in Compile()
54 const auto pred = xla::Gt(activation, zero); in Compile()
93 const auto activation = ctx->Input(1); in Compile() local
95 const auto exp_grad = xla::Mul(grad, xla::Add(activation, scale_alpha)); in Compile()
96 const auto pred = xla::Gt(activation, zero); in Compile()
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_gpu_test.py45 activation = None
50 activation = 'softmax'
55 activation = 'softmax'
59 activation = 'sigmoid'
62 activation=activation,
/external/autotest/client/site_tests/cellular_ActivateCDMA/
Dcontrol.pseudomodem9 activation process when shill accesses the modem via ModemManager.
24 activation command to ModemManager and keeps retrying in the case of failure.
25 This test verifies that the activation state exposed by ModemManager is
26 reflected correctly in shill and that activation retries are repeatedly
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/
Dmodel_utils.py37 activation=nn_ops.relu, argument
47 if activation is not None:
48 assert callable(activation)
49 output = activation(output)
/external/tensorflow/tensorflow/contrib/lite/
Dnnapi_delegate.cc312 auto check_and_add_activation = [&add_scalar_int32](int activation) { in AddOpsAndParams() argument
313 if (activation > kTfLiteActRelu6) { in AddOpsAndParams()
317 add_scalar_int32(activation); in AddOpsAndParams()
323 if (builtin->activation > kTfLiteActRelu6) { in AddOpsAndParams()
327 add_scalar_int32(builtin->activation); in AddOpsAndParams()
339 return check_and_add_activation(builtin->activation); in AddOpsAndParams()
348 return check_and_add_activation(builtin->activation); in AddOpsAndParams()
358 return check_and_add_activation(builtin->activation); in AddOpsAndParams()
363 return check_and_add_activation(builtin->activation); in AddOpsAndParams()
369 if (builtin->activation != kTfLiteActNone) { in AddOpsAndParams()
[all …]
/external/tensorflow/tensorflow/stream_executor/cuda/
Dcuda_driver.cc516 ScopedActivateContext activation(context); in ContextGetSharedMemConfig() local
533 ScopedActivateContext activation(context); in ContextSetSharedMemConfig() local
555 ScopedActivateContext activation(context); in LaunchKernel() local
575 ScopedActivateContext activation(context); in LoadCubin() local
592 ScopedActivateContext activation(context); in LoadPtx() local
665 ScopedActivateContext activation(context); in SynchronousMemsetUint8() local
678 ScopedActivateContext activation(context); in SynchronousMemsetUint32() local
692 ScopedActivateContext activation(context); in AsynchronousMemsetUint8() local
707 ScopedActivateContext activation(context); in AsynchronousMemsetUint32() local
848 ScopedActivateContext activation(context); in DeviceDeallocate() local
[all …]
/external/tensorflow/tensorflow/contrib/eager/python/examples/gan/
Dmnist.py60 64, 5, padding='SAME', data_format=data_format, activation=tf.tanh)
63 128, 5, data_format=data_format, activation=tf.tanh)
66 self.fc1 = layers.Dense(1024, activation=tf.tanh)
67 self.fc2 = layers.Dense(1, activation=None)
115 self.fc1 = layers.Dense(6 * 6 * 128, activation=tf.tanh)
121 64, 4, strides=2, activation=None, data_format=data_format)
125 1, 2, strides=2, activation=tf.nn.sigmoid, data_format=data_format)
/external/tensorflow/tensorflow/stream_executor/rocm/
Drocm_driver.cc404 ScopedActivateContext activation{context}; in ContextGetSharedMemConfig() local
420 ScopedActivateContext activation{context}; in ContextSetSharedMemConfig() local
441 ScopedActivateContext activation{context}; in LaunchKernel() local
479 ScopedActivateContext activation{context}; in LoadHsaco() local
502 ScopedActivateContext activation{context}; in SynchronousMemsetUint8() local
515 ScopedActivateContext activation{context}; in SynchronousMemsetUint32() local
530 ScopedActivateContext activation{context}; in AsynchronousMemsetUint8() local
546 ScopedActivateContext activation{context}; in AsynchronousMemsetUint32() local
672 ScopedActivateContext activation{context}; in DeviceDeallocate() local
700 ScopedActivateContext activation{context}; in HostAllocate() local
[all …]

12345678910>>...13