/external/tensorflow/tensorflow/contrib/tensor_forest/python/kernel_tests/ |
D | scatter_add_ndim_op_test.py | 30 input_data = variables.Variable( 37 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 40 input_data.eval()) 43 input_data = variables.Variable([[[1., 2., 3.], [4., 5., 6.]], 50 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 52 [[7., 8., 9.], [10., 11., 212.]]], input_data.eval()) 56 input_data = variables.Variable(init_val) 62 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 63 self.assertAllEqual(init_val, input_data.eval()) 67 input_data = variables.Variable(init_val) [all …]
|
/external/protobuf/src/google/protobuf/util/ |
D | json_util_test.cc | 242 string input_data = "0123456789"; in TEST() local 243 for (int input_pattern = 0; input_pattern < (1 << (input_data.size() - 1)); in TEST() 250 for (int j = 0; j < input_data.length() - 1; ++j) { in TEST() 252 byte_sink.Append(&input_data[start], j - start + 1); in TEST() 256 byte_sink.Append(&input_data[start], input_data.length() - start); in TEST() 258 EXPECT_EQ(input_data, string(buffer, input_data.length())); in TEST() 262 input_data = "012345678"; in TEST() 263 for (int input_pattern = 0; input_pattern < (1 << (input_data.size() - 1)); in TEST() 270 for (int j = 0; j < input_data.length() - 1; ++j) { in TEST() 272 byte_sink.Append(&input_data[start], j - start + 1); in TEST() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | convolution_variants_test.cc | 383 std::vector<float> input_data(64); in XLA_TEST_F() local 384 std::iota(input_data.begin(), input_data.end(), 0.0); in XLA_TEST_F() 385 Array4D<float> input_array(1, 1, 8, 8, input_data); in XLA_TEST_F() 403 std::vector<float> input_data(16 * 1 * 1 * 1); in XLA_TEST_F() local 404 std::iota(input_data.begin(), input_data.end(), 1.0); in XLA_TEST_F() 405 Array4D<float> input_array(16, 1, 1, 1, input_data); in XLA_TEST_F() 515 std::vector<float> input_data(2 * 8 * 8); in XLA_TEST_F() local 516 std::iota(input_data.begin(), input_data.end(), 0.0); in XLA_TEST_F() 517 Array4D<float> input_array(1, 2, 8, 8, input_data); in XLA_TEST_F() 541 std::vector<float> input_data(2 * 2 * 8 * 8); in XLA_TEST_F() local [all …]
|
D | reduce_test.cc | 91 std::vector<float> input_data(element_count); in RunR1ToR0Test() local 93 input_data[i] = rand_r(&seed_) % 3; in RunR1ToR0Test() 95 input_data[i] *= -1; in RunR1ToR0Test() 99 Literal::CreateR1(AsSlice(input_data)); in RunR1ToR0Test() 104 for (float item : input_data) { in RunR1ToR0Test() 112 tensorflow::gtl::ArraySlice<int> input_data) { in RunR1ToR0PredTest() argument 113 const int element_count = input_data.size(); in RunR1ToR0PredTest() 131 std::unique_ptr<Literal> input_literal = Literal::CreateR1(input_data); in RunR1ToR0PredTest() 136 for (bool item : input_data) { in RunR1ToR0PredTest() 170 Array2D<uint8> input_data(rows, cols); in RunR2ToR1PredTest() local [all …]
|
/external/tensorflow/tensorflow/examples/speech_commands/ |
D | input_data_test.py | 27 from tensorflow.examples.speech_commands import input_data 66 len(input_data.prepare_words_list(words_list)), len(words_list)) 70 input_data.which_set("foo.wav", 10, 10), 71 input_data.which_set("foo.wav", 10, 10)) 73 input_data.which_set("foo_nohash_0.wav", 10, 10), 74 input_data.which_set("foo_nohash_1.wav", 10, 10)) 79 audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 85 self.assertEquals(input_data.UNKNOWN_WORD_INDEX, 92 _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10, 100 _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10, [all …]
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/ |
D | testing_utils.py | 55 input_data=None, expected_output=None, argument 73 if input_data is None: 81 input_data = 10 * np.random.random(input_data_shape) 83 input_data -= 0.5 84 input_data = input_data.astype(input_dtype) 86 input_shape = input_data.shape 88 input_dtype = input_data.dtype 121 actual_output = model.predict(input_data) 144 output = recovered_model.predict(input_data) 149 model.train_on_batch(input_data, actual_output) [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/ |
D | reinterpret_string_to_float_op.cc | 38 void Evaluate(const Tensor& input_data, Tensor output_data, int32 start, in Evaluate() argument 41 const auto in_data = input_data.unaligned_flat<string>(); in Evaluate() 54 const Tensor& input_data = context->input(0); in Compute() local 57 if (!CheckTensorBounds(context, input_data)) return; in Compute() 61 context, context->allocate_output(0, input_data.shape(), &output_data)); in Compute() 64 const int32 num_data = static_cast<int32>(input_data.NumElements()); in Compute() 68 Evaluate(input_data, *output_data, 0, num_data); in Compute() 70 auto work = [&input_data, output_data, num_data](int64 start, int64 end) { in Compute() 73 Evaluate(input_data, *output_data, static_cast<int32>(start), in Compute()
|
/external/libjpeg-turbo/ |
D | jcsample.c | 62 JSAMPARRAY input_data, 148 JSAMPARRAY input_data, JSAMPARRAY output_data) in int_downsample() argument 165 expand_right_edge(input_data, cinfo->max_v_samp_factor, in int_downsample() 175 inptr = input_data[inrow+v] + outcol_h; in int_downsample() 195 JSAMPARRAY input_data, JSAMPARRAY output_data) in fullsize_downsample() argument 198 jcopy_sample_rows(input_data, 0, output_data, 0, in fullsize_downsample() 220 JSAMPARRAY input_data, JSAMPARRAY output_data) in h2v1_downsample() argument 232 expand_right_edge(input_data, cinfo->max_v_samp_factor, in h2v1_downsample() 237 inptr = input_data[outrow]; in h2v1_downsample() 257 JSAMPARRAY input_data, JSAMPARRAY output_data) in h2v2_downsample() argument [all …]
|
D | jdsample.c | 128 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in fullsize_upsample() argument 130 *output_data_ptr = input_data; in fullsize_upsample() 141 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in noop_upsample() argument 160 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in int_upsample() argument 177 inptr = input_data[inrow]; in int_upsample() 204 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in h2v1_upsample() argument 213 inptr = input_data[inrow]; in h2v1_upsample() 232 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in h2v2_upsample() argument 242 inptr = input_data[inrow]; in h2v2_upsample() 275 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) in h2v1_fancy_upsample() argument [all …]
|
D | jsimd.h | 43 JSAMPARRAY input_data, JSAMPARRAY output_data); 49 JSAMPARRAY input_data, JSAMPARRAY output_data); 53 JSAMPARRAY input_data, JSAMPARRAY output_data); 61 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr); 64 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr); 67 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr); 74 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr); 77 JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr);
|
/external/tensorflow/tensorflow/core/kernels/ |
D | colorspace_op.h | 30 typename TTypes<T, 2>::ConstTensor input_data, in operator() 37 auto R = input_data.template chip<1>(0); in operator() 38 auto G = input_data.template chip<1>(1); in operator() 39 auto B = input_data.template chip<1>(2); in operator() 47 V.device(d) = input_data.maximum(channel_axis); in operator() 49 range.device(d) = V - input_data.minimum(channel_axis); in operator() 68 typename TTypes<T, 2>::ConstTensor input_data, in operator() 70 auto H = input_data.template chip<1>(0); in operator() 71 auto S = input_data.template chip<1>(1); in operator() 72 auto V = input_data.template chip<1>(2); in operator()
|
D | topk_op.cc | 139 const T* input_data = &input(b, 0); in Compute() local 140 const auto stable_comp = [input_data](const int32 a, const int32 b) { in Compute() 141 if (input_data[b] < input_data[a]) { in Compute() 143 } else if (input_data[b] > input_data[a]) { in Compute() 149 const auto comp = [input_data](const int32 a, const int32 b) { in Compute() 150 return input_data[b] < input_data[a]; in Compute() 171 if (input_data[*run_begin] == input_data[*run_end]) { in Compute() 173 if (input_data[*run_begin] != input_data[*run_end]) break; in Compute()
|
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
D | BUILD | 18 ":input_data", 24 name = "input_data", 25 srcs = ["input_data.py"], 55 ":input_data", 68 ":input_data", 80 ":input_data", 92 ":input_data", 110 ":input_data", 131 ":input_data",
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/reference/ |
D | reference_ops.h | 157 inline void Conv(const float* input_data, const Dims<4>& input_dims, in Conv() argument 195 float input_value = input_data[Offset(input_dims, in_channel, in Conv() 221 void Conv(const float* input_data, const Dims<4>& input_dims, in Conv() argument 229 Conv(input_data, input_dims, filter_data, filter_dims, bias_data, bias_dims, in Conv() 237 void Conv(const float* input_data, const Dims<4>& input_dims, in Conv() argument 243 Conv<Ac>(input_data, input_dims, filter_data, filter_dims, bias_data, in Conv() 248 inline void Conv(const uint8* input_data, const Dims<4>& input_dims, in Conv() argument 289 int32 input_val = input_data[Offset(input_dims, in_channel, in Conv() 318 inline void Conv(const uint8* input_data, const Dims<4>& input_dims, in Conv() argument 338 Conv(input_data, input_dims, input_offset, filter_data, filter_dims, in Conv() [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/python/ |
D | tensor_forest_test.py | 64 input_data = [[-1., 0.], [-1., 2.], # node 1 76 graph = graph_builder.training_graph(input_data, input_labels) 80 input_data = [[-1., 0.], [-1., 2.], # node 1 93 graph = graph_builder.training_graph(input_data, input_labels) 97 input_data = [[-1., 0.], [-1., 2.], # node 1 108 probs, paths, var = graph_builder.inference_graph(input_data) 114 input_data = sparse_tensor.SparseTensor( 128 graph = graph_builder.training_graph(input_data, input_labels) 132 input_data = sparse_tensor.SparseTensor( 152 probs, paths, var = graph_builder.inference_graph(input_data)
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | cuda_dnn.h | 67 const DeviceMemory<Eigen::half>& input_data, 84 const DeviceMemory<float>& input_data, 101 const DeviceMemory<double>& input_data, 118 const DeviceMemory<Eigen::half>& input_data, 142 const DeviceMemory<float>& input_data, 166 const DeviceMemory<double>& input_data, 246 const DeviceMemory<float>& input_data, 257 const DeviceMemory<double>& input_data, 265 const DeviceMemory<Eigen::half>& input_data, 337 const DeviceMemory<float>& input_data, in DoConvolveQuantized() argument [all …]
|
/external/autotest/tko/parsers/ |
D | version_0_unittest.py | 205 input_data = ("\t\t\tGOOD\t----\t----\t" 207 line = version_0.status_line.parse_line(input_data) 218 input_data = ("\t\tGOOD\t----\t----\t" 221 line = version_0.status_line.parse_line(input_data + 235 input_data = ("\tEND FAIL\t----\ttest\tfield1=val1\tStatus\nwith\n" 238 line = version_0.status_line.parse_line(input_data) 249 input_data = " GOOD\trandom\tfields\tof text" 250 line = version_0.status_line.parse_line(input_data) 252 line = version_0.status_line.parse_line(input_data.lstrip()) 263 input_data = "\t\tGOOD\tfield\tsecond field" [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/hybrid/core/ops/ |
D | hard_routing_function_op.cc | 97 const Tensor& input_data = context->input(0); in Compute() local 101 if (input_data.shape().dim_size(0) > 0) { in Compute() 103 context, input_data.shape().dims() == 2, in Compute() 108 if (!CheckTensorBounds(context, input_data)) return; in Compute() 110 const int32 num_data = static_cast<int32>(input_data.shape().dim_size(0)); in Compute() 112 static_cast<int32>(input_data.shape().dim_size(1)); in Compute() 133 const auto data = input_data.tensor<float, 2>(); in Compute() 139 const Tensor point = input_data.Slice(i, i + 1); in Compute()
|
D | routing_function_op.cc | 88 const Tensor& input_data = context->input(0); in Compute() local 92 if (input_data.shape().dim_size(0) > 0) { in Compute() 94 context, input_data.shape().dims() == 2, in Compute() 99 if (!CheckTensorBounds(context, input_data)) return; in Compute() 101 const int32 num_data = static_cast<int32>(input_data.shape().dim_size(0)); in Compute() 103 static_cast<int32>(input_data.shape().dim_size(1)); in Compute() 118 const Tensor point = input_data.Slice(i, i + 1); in Compute()
|
D | k_feature_routing_function_op.cc | 103 const Tensor& input_data = context->input(0); in Compute() local 107 if (input_data.shape().dim_size(0) > 0) { in Compute() 109 context, input_data.shape().dims() == 2, in Compute() 114 if (!CheckTensorBounds(context, input_data)) return; in Compute() 116 const int32 num_data = static_cast<int32>(input_data.shape().dim_size(0)); in Compute() 118 static_cast<int32>(input_data.shape().dim_size(1)); in Compute() 134 const Tensor point = input_data.Slice(i, i + 1); in Compute()
|
/external/libjpeg-turbo/simd/ |
D | jcsample-mmx.asm | 31 ; JSAMPARRAY input_data, JSAMPARRAY output_data); 38 %define input_data(b) (b)+24 ; JSAMPARRAY input_data 71 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 105 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 148 add esi, byte SIZEOF_JSAMPROW ; input_data 173 ; JSAMPARRAY input_data, JSAMPARRAY output_data); 180 %define input_data(b) (b)+24 ; JSAMPARRAY input_data 213 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 247 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 305 add esi, byte 2*SIZEOF_JSAMPROW ; input_data
|
D | jcsample-sse2.asm | 31 ; JSAMPARRAY input_data, JSAMPARRAY output_data); 38 %define input_data(b) (b)+24 ; JSAMPARRAY input_data 71 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 105 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 163 add esi, byte SIZEOF_JSAMPROW ; input_data 186 ; JSAMPARRAY input_data, JSAMPARRAY output_data); 193 %define input_data(b) (b)+24 ; JSAMPARRAY input_data 226 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 260 mov esi, JSAMPARRAY [input_data(ebp)] ; input_data 334 add esi, byte 2*SIZEOF_JSAMPROW ; input_data
|
/external/tensorflow/tensorflow/stream_executor/ |
D | dnn.h | 1157 const DeviceMemory<float>& input_data, 1170 const DeviceMemory<double>& input_data, 1181 const DeviceMemory<Eigen::half>& input_data, 1203 const DeviceMemory<float>& input_data, 1214 const DeviceMemory<float>& input_data, 1232 const DeviceMemory<float>& input_data, 1309 const DeviceMemory<float>& input_data, 1327 const DeviceMemory<Eigen::half>& input_data, 1352 const DeviceMemory<float>& input_data, 1360 const DeviceMemory<double>& input_data, [all …]
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 327 inline void FullyConnected(const float* input_data, const Dims<4>& input_dims, in FullyConnected() argument 345 MapAsMatrixWithGivenNumberOfRows(input_data, input_dims, input_rows); in FullyConnected() 359 void FullyConnected(const float* input_data, const Dims<4>& input_dims, in FullyConnected() argument 365 FullyConnected(input_data, input_dims, weights_data, weights_dims, bias_data, in FullyConnected() 380 const uint8* input_data, const Dims<4>& input_dims, int32 input_offset, in FullyConnectedAsGEMV() argument 398 preload_l1_stream(input_data + k); in FullyConnectedAsGEMV() 415 const uint8x16_t input_val_u8 = vld1q_u8(input_data + in); in FullyConnectedAsGEMV() 450 const uint8x8_t input_val_u8 = vld1_u8(input_data + in); in FullyConnectedAsGEMV() 480 const int32 input_val = input_data[in] + input_offset; in FullyConnectedAsGEMV() 560 inline void FullyConnected(const uint8* input_data, const Dims<4>& input_dims, in FullyConnected() argument [all …]
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | fold_batch_norms_test.cc | 43 Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); in TestFoldBatchNormsConv2D() local 45 &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, in TestFoldBatchNormsConv2D() 48 Const(root.WithOpName("input_op"), Input::Initializer(input_data)); in TestFoldBatchNormsConv2D() 94 Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); in TestFoldBatchNormsConv2DShared() local 96 &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, in TestFoldBatchNormsConv2DShared() 99 Const(root.WithOpName("input_op"), Input::Initializer(input_data)); in TestFoldBatchNormsConv2DShared() 152 Tensor input_data(DT_FLOAT, TensorShape({6, 2})); in TestFoldBatchNormsMatMul() local 154 &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, in TestFoldBatchNormsMatMul() 157 Const(root.WithOpName("input_op"), Input::Initializer(input_data)); in TestFoldBatchNormsMatMul()
|