Home
last modified time | relevance | path

Searched refs:batches (Results 1 – 25 of 93) sorted by relevance

1234

/external/tensorflow/tensorflow/contrib/slim/python/slim/data/
Dprefetch_queue_test.py54 batches = input_lib.batch(
57 batches = prefetch_queue.prefetch_queue(batches).dequeue()
63 results = sess.run(batches)
72 sess.run(batches)
91 batches = input_lib.batch(
94 batches = prefetch_queue.prefetch_queue(batches).dequeue()
101 results = sess.run(batches)
112 sess.run(batches)
131 batches = input_lib.batch(
134 batcher = prefetch_queue.prefetch_queue(batches)
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Drecord_input_test.py102 batches = 2
114 batches=batches)
121 for _ in range(int(files * records_per_file / batches)):
123 self.assertTrue(len(op_list) is batches)
159 batches = 2
171 batches=batches)
178 for _ in range(int(files * records_per_file / batches)):
180 self.assertTrue(len(op_list) is batches)
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/
Dbucket_by_sequence_length_test.py147 batches = []
150 batches.append(batch)
170 for batch in batches:
243 batches = []
246 batches.append(batch)
252 for batch in batches:
294 batches = []
297 batches.append(batch)
303 for batch in batches:
332 batches = []
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/
Dfreedreno_batch_cache.c208 cache->batches[batch->idx] = NULL; in fd_bc_invalidate_batch()
265 for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) { in fd_bc_alloc_batch()
266 batch = cache->batches[i]; in fd_bc_alloc_batch()
280 for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) { in fd_bc_alloc_batch()
281 if ((cache->batches[i] == ctx->batch) || in fd_bc_alloc_batch()
282 !cache->batches[i]->needs_flush) in fd_bc_alloc_batch()
284 if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno)) in fd_bc_alloc_batch()
285 fd_batch_reference_locked(&flush_batch, cache->batches[i]); in fd_bc_alloc_batch()
303 for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) { in fd_bc_alloc_batch()
304 struct fd_batch *other = cache->batches[i]; in fd_bc_alloc_batch()
[all …]
Dfreedreno_batch_cache.h50 struct fd_batch *batches[32]; member
60 for (uint32_t _m = (mask); _m && ((batch) = (cache)->batches[u_bit_scan(&_m)]); _m &= (mask))
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dfully_connected.h44 const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); in FullyConnected() local
48 for (int b = 0; b < batches; ++b) { in FullyConnected()
90 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local
94 for (int b = 0; b < batches; ++b) { in FullyConnected()
138 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local
142 for (int b = 0; b < batches; ++b) { in FullyConnected()
192 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in ShuffledFullyConnected() local
201 if (batches == 1) { in ShuffledFullyConnected()
205 } else if (batches == 4) { in ShuffledFullyConnected()
225 if (batches == 1) { in ShuffledFullyConnected()
[all …]
Ddepthwiseconv_float.h44 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in DepthwiseConv() local
56 for (int b = 0; b < batches; ++b) { in DepthwiseConv()
/external/tensorflow/tensorflow/core/kernels/
Dcrop_and_resize_op_benchmark_test.cc24 static Graph* BM_CropAndResize(int batches, int width, int height, int depth, in BM_CropAndResize() argument
27 Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth})); in BM_CropAndResize()
29 Tensor boxes(DT_FLOAT, TensorShape({batches, 4})); in BM_CropAndResize()
31 Tensor box_ind(DT_INT32, TensorShape({batches})); in BM_CropAndResize()
33 for (int i = 0; i < batches; ++i) { in BM_CropAndResize()
Dadjust_contrast_op_benchmark_test.cc24 static Graph* BM_AdjustContrast(int batches, int width, int height) { in BM_AdjustContrast() argument
26 Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3})); in BM_AdjustContrast()
Dresize_op_benchmark_test.cc24 static Graph* BM_Resize(const char* algorithm, int batches, int width, in BM_Resize() argument
27 Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3})); in BM_Resize()
/external/mesa3d/src/mesa/main/
Dglthread.c90 glthread->batches[i].ctx = ctx; in _mesa_glthread_init()
91 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init()
119 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy()
149 struct glthread_batch *next = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch()
193 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
194 struct glthread_batch *next = &glthread->batches[glthread->next]; in _mesa_glthread_finish()
/external/tensorflow/tensorflow/lite/kernels/
Dsvdf_test.cc131 BaseSVDFOpModel(int batches, int units, int input_size, int memory_size, in BaseSVDFOpModel() argument
135 : batches_(batches), in BaseSVDFOpModel()
146 TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}}, in BaseSVDFOpModel()
157 {batches, memory_size * num_filters} // activation_state tensor in BaseSVDFOpModel()
205 HybridSVDFOpModel(int batches, int units, int input_size, int memory_size, in HybridSVDFOpModel() argument
207 : BaseSVDFOpModel(batches, units, input_size, memory_size, rank, in HybridSVDFOpModel()
Dfully_connected_test.cc137 TfLiteRegistration* registration, int units, int batches, in BaseFullyConnectedOpModel() argument
143 : batches_(batches), units_(units) {
276 HybridFullyConnectedOpModel(int units, int batches, const TensorData& input, in HybridFullyConnectedOpModel() argument
279 : batches_(batches), units_(units) {
550 int batches, FullyConnectedOptionsWeightsFormat weights_format) { in SimpleTestQuantizedInt16OutputCase() argument
563 registration, output_depth, batches, in SimpleTestQuantizedInt16OutputCase()
565 {TensorType_UINT8, {batches, input_depth}, kInputMin, kInputMax}, in SimpleTestQuantizedInt16OutputCase()
590 CHECK(batches == 1 || batches == 4); in SimpleTestQuantizedInt16OutputCase()
598 std::vector<float> input_data(input_depth * batches); in SimpleTestQuantizedInt16OutputCase()
617 std::vector<float> expected_output_data(output_depth * batches); in SimpleTestQuantizedInt16OutputCase()
[all …]
Dbasic_rnn_test.cc176 RNNOpModel(int batches, int units, int size, in RNNOpModel() argument
179 : batches_(batches), units_(units), input_size_(size) { in RNNOpModel()
236 HybridRNNOpModel(int batches, int units, int size, TensorType tensor_type) in HybridRNNOpModel() argument
237 : RNNOpModel(batches, units, size, tensor_type, tensor_type) { in HybridRNNOpModel()
Dlog_softmax_test.cc34 LogSoftmaxOpModel(int batches, int size) in LogSoftmaxOpModel() argument
35 : batches_(batches), input_size_(size) { in LogSoftmaxOpModel()
Dunidirectional_sequence_rnn_test.cc175 int batches, int sequence_len, int units, int size, bool time_major, in UnidirectionalRNNOpModel() argument
178 : batches_(batches), in UnidirectionalRNNOpModel()
250 HybridUnidirectionalRNNOpModel(int batches, int sequence_len, int units, in HybridUnidirectionalRNNOpModel() argument
253 : UnidirectionalRNNOpModel(batches, sequence_len, units, size, time_major, in HybridUnidirectionalRNNOpModel()
Dsoftmax_test.cc34 SoftmaxOpModel(int batches, int size, float beta) in SoftmaxOpModel() argument
35 : batches_(batches), input_size_(size), beta_(beta) { in SoftmaxOpModel()
/external/tensorflow/tensorflow/lite/kernels/internal/reference/integer_ops/
Dpooling.h31 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local
39 for (int batch = 0; batch < batches; ++batch) { in AveragePool()
92 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local
100 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
Dfully_connected.h42 const int batches = output_shape.Dims(0); in FullyConnected() local
46 for (int b = 0; b < batches; ++b) { in FullyConnected()
Dconv.h50 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in ConvPerChannel() local
64 for (int batch = 0; batch < batches; ++batch) { in ConvPerChannel()
/external/tensorflow/tensorflow/contrib/hvx/hexagon_controller/src_impl/
Dgraph_functions_wrapper.c213 const uint32_t nn_id, const uint32_t batches, const uint32_t height, in hexagon_controller_ExecuteGraph() argument
220 TFMLOGD("Input: %d, %d, %d, %d, %d, %d", batches, height, width, depth, in hexagon_controller_ExecuteGraph()
229 input.batches = batches; in hexagon_controller_ExecuteGraph()
243 *out_batches = output.batches; in hexagon_controller_ExecuteGraph()
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_generator_test.py339 def _make_dataset(self, inputs, batches): argument
340 return dataset_ops.DatasetV2.from_tensors(inputs).repeat(batches)
342 def _make_iterator(self, inputs, batches): argument
344 self._make_dataset(inputs, batches))
346 def _make_generator(self, inputs, batches): argument
349 for _ in range(batches):
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_BatchFunction.pbtxt25 Number of scheduling threads for processing batches of work.
26 Determines the number of batches processed in parallel.
45 Maximum number of batches enqueued. Default: 10.
53 batches up to one of those sizes. The entries must increase monotonically, and
Dapi_def_Batch.pbtxt23 num_batch_threads: Number of scheduling threads for processing batches of work.
24 Determines the number of batches processed in parallel.
30 batches up to one of those sizes. The entries must increase monotonically, and
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils_test.cc414 const int b_rows = 29, b_cols = 1, batches = 2; in TEST() local
427 int8_t b_int8_data[b_rows * b_cols * batches]; in TEST()
429 float scaling_factor_b[batches]; in TEST()
446 for (int i = 0; i < b_rows * b_cols * batches; ++i) { in TEST()
453 float c_float_data[a_rows * b_cols * batches]; in TEST()
454 for (int i = 0; i < a_rows * b_cols * batches; ++i) { in TEST()
464 scaling_factor_c, batches, c_float_data, in TEST()
471 for (int i = 0; i < a_rows * b_cols * batches; ++i) { in TEST()

1234