/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/ |
D | inception_v3_test.py | 39 batch_size = 5 43 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 53 batch_size = 5 56 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 60 [batch_size, 8, 8, 2048]) 70 batch_size = 5 81 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 89 batch_size = 5 [all …]
|
D | inception_v2_test.py | 39 batch_size = 5 43 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 53 batch_size = 5 56 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 60 [batch_size, 7, 7, 1024]) 69 batch_size = 5 78 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 86 batch_size = 5 [all …]
|
D | vgg_test.py | 34 batch_size = 5 38 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 42 [batch_size, num_classes]) 45 batch_size = 1 49 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 53 [batch_size, 2, 2, num_classes]) 56 batch_size = 5 61 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 73 batch_size = 5 77 inputs = random_ops.random_uniform((batch_size, height, width, 3)) [all …]
|
D | inception_v1_test.py | 39 batch_size = 5 43 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 53 batch_size = 5 56 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 60 [batch_size, 7, 7, 1024]) 70 batch_size = 5 80 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 88 batch_size = 5 [all …]
|
D | overfeat_test.py | 33 batch_size = 5 37 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 41 [batch_size, num_classes]) 44 batch_size = 1 48 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 52 [batch_size, 2, 2, num_classes]) 55 batch_size = 5 59 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 70 batch_size = 5 74 inputs = random_ops.random_uniform((batch_size, height, width, 3)) [all …]
|
D | alexnet_test.py | 33 batch_size = 5 37 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 41 [batch_size, num_classes]) 44 batch_size = 1 48 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 52 [batch_size, 4, 7, num_classes]) 55 batch_size = 5 59 inputs = random_ops.random_uniform((batch_size, height, width, 3)) 70 batch_size = 5 74 inputs = random_ops.random_uniform((batch_size, height, width, 3)) [all …]
|
/external/tensorflow/tensorflow/python/estimator/canned/ |
D | dnn_test.py | 171 label_dimension, batch_size): argument 194 self.assertAllEqual((batch_size, label_dimension), predictions.shape) 207 batch_size = 10 208 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) 209 data = data.reshape(batch_size, label_dimension) 214 batch_size=batch_size, 220 batch_size=batch_size, 224 batch_size=batch_size, 233 batch_size=batch_size) 240 batch_size = 10 [all …]
|
D | dnn_linear_combined_test.py | 226 label_dimension, batch_size): argument 253 self.assertAllEqual((batch_size, label_dimension), predictions.shape) 266 batch_size = 10 267 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) 268 data = data.reshape(batch_size, label_dimension) 273 batch_size=batch_size, 279 batch_size=batch_size, 283 batch_size=batch_size, 292 batch_size=batch_size) 299 batch_size = 10 [all …]
|
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/ |
D | basic_decoder_test.py | 47 batch_size = 5 54 inputs = np.random.randn(batch_size, max_time, 69 dtype=dtypes.float32, batch_size=batch_size), 85 batch_size_t = my_decoder.batch_size 91 self.assertEqual((batch_size, expected_output_depth), 93 self.assertEqual((batch_size,), step_outputs[1].get_shape()) 94 self.assertEqual((batch_size, cell_depth), first_state[0].get_shape()) 95 self.assertEqual((batch_size, cell_depth), first_state[1].get_shape()) 96 self.assertEqual((batch_size, cell_depth), step_state[0].get_shape()) 97 self.assertEqual((batch_size, cell_depth), step_state[1].get_shape()) [all …]
|
D | beam_search_decoder_test.py | 111 self.batch_size = 2 118 dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width]) 122 array_ops.ones([self.batch_size, self.beam_width])), 124 2, shape=[self.batch_size, self.beam_width], dtype=dtypes.int64), 126 [self.batch_size, self.beam_width], dtype=dtypes.bool)) 128 logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size], 146 batch_size=ops.convert_to_tensor(self.batch_size), 173 dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width]) 177 array_ops.ones([self.batch_size, self.beam_width])), 183 logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size], [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | input_test.py | 444 batch_size = 10 448 counter = examples.count_up_to(num_batches * batch_size) 462 batch_size=batch_size) 466 [counter, sparse_counter, "string"], batch_size=batch_size) 475 np.arange(i * batch_size, (i + 1) * batch_size)) 479 np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ... 480 [0, 1] * batch_size)).T) 482 expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2 483 expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...] 485 self.assertAllEqual(results[1].dense_shape, [batch_size, 2]) [all …]
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/data/ |
D | prefetch_queue_test.py | 41 batch_size = 10 48 counter = examples.count_up_to(num_batches * batch_size) 55 [counter, image, label], batch_size=batch_size, num_threads=1) 65 np.arange(i * batch_size, (i + 1) * batch_size)) 67 (batch_size, image_size, image_size, 3)) 68 self.assertEquals(results[2].shape, (batch_size, 1)) 78 batch_size = 10 85 counter = examples.count_up_to(num_batches * batch_size) 92 [counter, image, label], batch_size=batch_size, num_threads=4) 104 (batch_size, image_size, image_size, 3)) [all …]
|
/external/tensorflow/tensorflow/core/util/ctc/ |
D | ctc_beam_search_test.cc | 104 const int batch_size = 1; in TEST() local 119 int sequence_lengths[batch_size] = {timesteps}; in TEST() 120 float input_data_mat[timesteps][batch_size][num_classes] = { in TEST() 129 for (int b = 0; b < batch_size; ++b) { in TEST() 151 Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size); in TEST() 155 inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes); in TEST() 161 output.resize(batch_size); in TEST() 163 float score[batch_size][top_paths] = {{0.0}}; in TEST() 164 Eigen::Map<Eigen::MatrixXf> scores(&score[0][0], batch_size, top_paths); in TEST() 174 output.resize(batch_size); in TEST() [all …]
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/resnet50/ |
D | resnet50_graph_test.py | 35 def image_shape(batch_size): argument 37 return [batch_size, 3, 224, 224] 38 return [batch_size, 224, 224, 3] 41 def random_batch(batch_size): argument 42 images = np.random.rand(*image_shape(batch_size)).astype(np.float32) 45 low=0, high=num_classes, size=[batch_size]).astype(np.int32) 46 one_hot = np.zeros((batch_size, num_classes)).astype(np.float32) 47 one_hot[np.arange(batch_size), labels] = 1. 54 batch_size = 64 64 np_images, _ = random_batch(batch_size) [all …]
|
/external/tensorflow/tensorflow/contrib/rnn/python/kernel_tests/ |
D | gru_ops_test.py | 45 batch_size = 4 57 feed[x] = np.random.randn(num_steps, batch_size, input_size) 62 batch_size = 4 70 x = array_ops.zeros([batch_size, input_size]) 71 h = array_ops.zeros([batch_size, cell_size]) 74 x_value = np.random.rand(batch_size, input_size) 75 h_value = np.random.rand(batch_size, cell_size) 95 batch_size = 2 107 dtypes.float32, shape=(time_steps, batch_size, input_size)) 108 h = array_ops.zeros([batch_size, cell_size]) [all …]
|
D | rnn_cell_test.py | 53 batch_size = 3 65 x = array_ops.zeros([batch_size, input_size]) 66 m = array_ops.zeros([batch_size, state_size]) 76 0.1 * np.ones((batch_size, state_size)) 87 batch_size = 3 94 x = array_ops.zeros([batch_size, input_size]) 95 m = array_ops.zeros([batch_size, state_size * num_shifts]) 108 0.1 * np.ones((batch_size, int(state_size * (num_shifts)))) 113 self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts)) 114 self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts)) [all …]
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/learn_io/ |
D | graph_io.py | 43 batch_size, argument 96 batch_size=batch_size, 110 batch_size, argument 165 batch_size, 179 batch_size, argument 237 batch_size, 330 batch_size, argument 382 if (batch_size is None) or ( 383 (not isinstance(batch_size, ops.Tensor)) and 384 (batch_size <= 0 or batch_size >= queue_capacity)): [all …]
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
D | debug.py | 92 batch_size = None 98 if batch_size is None: 99 batch_size = first_dim 101 size_checks.append(check_ops.assert_equal(batch_size, first_dim)) 104 logits = array_ops.zeros([batch_size, head.logits_dimension]) 201 def predict_classes(self, input_fn=None, batch_size=None): argument 214 input_fn=input_fn, batch_size=batch_size, outputs=[key]) 219 batch_size=None): argument 232 batch_size=batch_size, 312 def predict_scores(self, input_fn=None, batch_size=None): argument [all …]
|
D | dynamic_rnn_estimator_test.py | 357 batch_size = 11 363 def get_shift_input_fn(batch_size, sequence_length, seed=None): argument 367 [batch_size, sequence_length + 1], 373 [batch_size, sequence_length]) 377 [batch_size, sequence_length])), 2) 380 [batch_size, cell_size], seed=((i + 1) * seed)) 402 train_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=12321) 403 eval_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=32123) 411 self.assertListEqual(list(state_piece.shape), [batch_size, state_size]) 416 batch_size = 11 [all …]
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | sampling_ops_test.py | 47 batch_size = 16 55 batch_size, 63 batch_size, 71 probs, batch_size, init_probs) 76 array_ops.zeros([1, 3]), label, probs, batch_size, init_probs) 84 batch_size, 94 batch_size, 100 sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs) 109 batch_size, 116 val, label, [.1] * 10, batch_size, init_probs=[.2] * 5) [all …]
|
/external/tensorflow/tensorflow/contrib/lite/kernels/ |
D | softmax_test.cc | 80 const int batch_size = 2; in TEST() local 88 SoftmaxOpModel m(batch_size, input_size, beta); in TEST() 90 m.SetInput(0, input_buffer, input_buffer + input_size * batch_size); in TEST() 94 std::unique_ptr<float[]> output_buffer(new float[input_size * batch_size]); in TEST() 95 static tflite::Dims<4> input_dims = {{input_size, 1, 1, batch_size}, in TEST() 102 output_buffer.get() + input_size * batch_size); in TEST() 108 const int batch_size = 2; in TEST() local 116 SoftmaxOpModel m(batch_size, input_size, beta); in TEST() 118 m.SetInput(0, input_buffer, input_buffer + input_size * batch_size); in TEST() 122 std::unique_ptr<float[]> output_buffer(new float[input_size * batch_size]); in TEST() [all …]
|
/external/tensorflow/tensorflow/contrib/rnn/kernels/ |
D | gru_ops.cc | 52 const int64 batch_size = x_tensor->dim_size(0); in Compute() local 59 OP_REQUIRES(ctx, h_prev_tensor->dim_size(0) == batch_size, in Compute() 62 batch_size)); in Compute() 111 ctx, ctx->allocate_output("r", TensorShape({batch_size, cell_size}), in Compute() 116 ctx, ctx->allocate_output("u", TensorShape({batch_size, cell_size}), in Compute() 121 ctx, ctx->allocate_output("c", TensorShape({batch_size, cell_size}), in Compute() 127 TensorShape({batch_size, cell_size}), &h_tensor)); in Compute() 133 TensorShape({batch_size, input_size + cell_size}), in Compute() 139 TensorShape({batch_size, input_size + cell_size}), in Compute() 145 TensorShape({batch_size, 2 * cell_size}), in Compute() [all …]
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/gan/ |
D | mnist_graph_test.py | 41 def _create_graph(self, batch_size): argument 43 images_data = np.random.randn(batch_size, 784).astype(np.float32) 57 shape=[batch_size, NOISE_DIM]) 77 def _report(self, test_name, start, num_iters, batch_size): argument 80 name = 'graph_%s_%s_batch_%d_%s' % (test_name, dev, batch_size, 82 extras = {'examples_per_sec': batch_size / avg_time} 87 for batch_size in [64, 128, 256]: 96 ) = self._create_graph(batch_size) 106 noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM]) 117 noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM]) [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | rnn_test.py | 77 def zero_state(self, batch_size, dtype): argument 95 def zero_state(self, batch_size, dtype): argument 277 batch_size = 512 282 sequence_length = np.random.randint(0, max_time, size=batch_size) 284 np.random.randn(batch_size, num_units).astype(np.float32) 324 def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu): argument 330 sequence_length = np.random.randint(0, max_time, size=batch_size) 332 np.random.randn(batch_size, num_units).astype(np.float32) 358 (batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic, 386 def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units, argument [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | matrix_inverse_op.cc | 149 const int64 batch_size = input_copy_reshaped.dimension(0); in ComputeAsync() local 155 TensorShape{batch_size, n}, &pivots), in ComputeAsync() 159 sizeof(Scalar*) * batch_size, "input_copy_ptr_array", in ComputeAsync() 162 sizeof(Scalar*) * batch_size, "output_copy_ptr_array", in ComputeAsync() 166 if (n < 32 || batch_size > n) { in ComputeAsync() 176 for (int batch = 0; batch < batch_size; ++batch) { in ComputeAsync() 184 solver->GetDeviceLapackInfo(batch_size, "MatInvBatched")); in ComputeAsync() 189 batch_size), in ComputeAsync() 196 solver->GetDeviceLapackInfo(batch_size, "GetrfBatched")); in ComputeAsync() 200 &dev_info.back(), batch_size), in ComputeAsync() [all …]
|