Home
last modified time | relevance | path

Searched refs:flat_input (Results 1 – 6 of 6) sorted by relevance

/external/tensorflow/tensorflow/lite/experimental/examples/lstm/
Drnn.py210 flat_input = nest.flatten(inputs)
214 flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
215 flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
228 batch_size = _best_effort_input_batch_size(flat_input)
257 inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
/external/tensorflow/tensorflow/python/ops/
Drnn.py72 def _best_effort_input_batch_size(flat_input): argument
86 for input_ in flat_input:
97 return array_ops.shape(flat_input[0])[1]
617 flat_input = nest.flatten(inputs)
621 flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
622 flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
634 batch_size = _best_effort_input_batch_size(flat_input)
662 inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
729 flat_input = nest.flatten(inputs)
733 input_shape = array_ops.shape(flat_input[0])
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/
Dfused_batchnorm_reserve_space_test.cc113 auto flat_input = input_data.flat<float>(); in TEST() local
114 for (int i = 0; i < flat_input.size(); i++) { in TEST()
115 flat_input.data()[i] = (i - 5) / 1000.0f; in TEST()
/external/tensorflow/tensorflow/core/kernels/
Dquantization_utils.h732 auto flat_input = input.flat<float>(); in FloatTensorToQuantizedInPlaceUsingEigen() local
734 DCHECK_EQ(flat_input.size(), flat_result.size()); in FloatTensorToQuantizedInPlaceUsingEigen()
737 flat_result.device(device) = QUANTIZE_WITH_EIGEN(flat_input, f2q, T); in FloatTensorToQuantizedInPlaceUsingEigen()
744 auto flat_input = input.flat<float>(); in FloatTensorToQuantizedInPlace() local
746 const int data_size = flat_input.size(); in FloatTensorToQuantizedInPlace()
749 flat_result(i) = FloatToQuantized<T>(flat_input(i), min, max); in FloatTensorToQuantizedInPlace()
766 auto flat_input = input.flat<T>(); in QuantizedTensorToFloatInPlaceUsingEigen() local
768 const int data_size = flat_input.size(); in QuantizedTensorToFloatInPlaceUsingEigen()
772 flat_result.device(device) = DEQUANTIZE_WITH_EIGEN(flat_input, q2f); in QuantizedTensorToFloatInPlaceUsingEigen()
780 auto flat_input = input.flat<T>(); in QuantizedTensorToFloatInPlace() local
[all …]
Drelu_op.cc198 auto flat_input = input.flat<qint8>(); in Operate() local
199 OP_REQUIRES(context, (flat_input.size() % 4) == 0, in Operate()
202 flat_input.size())); in Operate()
204 func(context->eigen_device<Device>(), flat_input, output->flat<qint8>()); in Operate()
/external/tensorflow/tensorflow/python/ops/ragged/
Dragged_string_ops.py368 flat_input = array_ops.reshape(input.flat_values, [-1])
370 flat_input = array_ops.reshape(input, [-1])
377 input=flat_input,