Searched refs:flat_input (Results 1 – 9 of 9) sorted by relevance
/external/tensorflow/tensorflow/lite/experimental/examples/lstm/ |
D | rnn.py | 213 flat_input = nest.flatten(inputs) 217 flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] 218 flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) 231 batch_size = _best_effort_input_batch_size(flat_input) 260 inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
|
/external/tensorflow/tensorflow/python/ops/ |
D | rnn.py | 70 def _best_effort_input_batch_size(flat_input): argument 83 for input_ in flat_input: 94 return array_ops.shape(flat_input[0])[1] 635 flat_input = nest.flatten(inputs) 639 flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] 640 flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) 653 batch_size = _best_effort_input_batch_size(flat_input) 682 inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) 749 flat_input = nest.flatten(inputs) 753 input_shape = array_ops.shape(flat_input[0]) [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | fused_batchnorm_reserve_space_test.cc | 127 auto flat_input = input_data.flat<float>(); in TEST() local 128 for (int i = 0; i < flat_input.size(); i++) { in TEST() 129 flat_input.data()[i] = (i - 5) / 1000.0f; in TEST()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op.cc | 196 auto flat_input = input.flat<qint8>(); in Operate() local 197 OP_REQUIRES(context, (flat_input.size() % 4) == 0, in Operate() 200 flat_input.size())); in Operate() 202 func(context->eigen_device<Device>(), flat_input, output->flat<qint8>()); in Operate()
|
D | quantization_utils.h | 741 auto flat_input = input.flat<float>(); in FloatTensorToQuantizedInPlaceUsingEigen() local 743 DCHECK_EQ(flat_input.size(), flat_result.size()); in FloatTensorToQuantizedInPlaceUsingEigen() 746 flat_result.device(device) = QUANTIZE_WITH_EIGEN(flat_input, f2q, T); in FloatTensorToQuantizedInPlaceUsingEigen() 753 auto flat_input = input.flat<float>(); in FloatTensorToQuantizedInPlace() local 755 const int data_size = flat_input.size(); in FloatTensorToQuantizedInPlace() 758 flat_result(i) = FloatToQuantized<T>(flat_input(i), min, max); in FloatTensorToQuantizedInPlace() 775 auto flat_input = input.flat<T>(); in QuantizedTensorToFloatInPlaceUsingEigen() local 777 const int data_size = flat_input.size(); in QuantizedTensorToFloatInPlaceUsingEigen() 781 flat_result.device(device) = DEQUANTIZE_WITH_EIGEN(flat_input, q2f); in QuantizedTensorToFloatInPlaceUsingEigen() 789 auto flat_input = input.flat<T>(); in QuantizedTensorToFloatInPlace() local [all …]
|
D | unsorted_segment_join_op.cc | 120 auto flat_input = input.flat<tstring>(); in Compute() local 143 output_flat(output_index).append(flat_input(offset)); in Compute()
|
/external/tensorflow/tensorflow/compiler/mlir/tools/kernel_gen/tests/ |
D | print_memrefs.mlir | 18 %flat_input = memref_reshape %input(%num_elem)
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_string_ops.py | 433 flat_input = array_ops.reshape(input.flat_values, [-1]) 435 flat_input = array_ops.reshape(input, [-1]) 442 input=flat_input,
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_quantize_op.cc | 377 auto flat_input = input.flat<float>().data(); in Compute() local 391 minfirst_input[i] = flat_input[i] - min_range; in Compute()
|