Home
last modified time | relevance | path

Searched refs:batches (Results 1 – 25 of 171) sorted by relevance

1234567

/external/igt-gpu-tools/tests/i915/
Dgem_exec_gttfill.c38 struct batch *batches = array; in xchg_batch() local
41 tmp = batches[i]; in xchg_batch()
42 batches[i] = batches[j]; in xchg_batch()
43 batches[j] = tmp; in xchg_batch()
49 struct batch *batches, unsigned int count) in submit() argument
90 obj.handle = batches[i].handle; in submit()
98 memcpy(batches[i].ptr + eb->batch_start_offset, in submit()
113 struct batch *batches; in fillgtt() local
155 batches = calloc(count, sizeof(*batches)); in fillgtt()
156 igt_assert(batches); in fillgtt()
[all …]
Dgem_exec_whisper.c173 struct drm_i915_gem_exec_object2 batches[1024]; in whisper() local
311 memset(batches, 0, sizeof(batches)); in whisper()
313 batches[n].handle = gem_create(fd, 4096); in whisper()
314 gem_write(fd, batches[n].handle, 0, &bbe, sizeof(bbe)); in whisper()
316 execbuf.buffers_ptr = to_user_pointer(batches); in whisper()
337 batches[n].relocs_ptr = to_user_pointer(&inter[n]); in whisper()
338 batches[n].relocation_count = 1; in whisper()
339 gem_write(fd, batches[n].handle, 0, batch, sizeof(batch)); in whisper()
341 old_offset = batches[n].offset; in whisper()
385 gem_write(fd, batches[1023].handle, loc, &pass, sizeof(pass)); in whisper()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/
Dfreedreno_batch_cache.c137 struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0}; in bc_flush() local
145 fd_batch_reference_locked(&batches[n++], batch); in bc_flush()
153 if (batches[i] && (batches[i]->ctx == ctx) && in bc_flush()
154 (batches[i] != current_batch)) { in bc_flush()
155 fd_batch_add_dep(current_batch, batches[i]); in bc_flush()
164 fd_batch_flush(batches[i]); in bc_flush()
169 fd_batch_reference(&batches[i], NULL); in bc_flush()
267 cache->batches[batch->idx] = NULL; in fd_bc_invalidate_batch()
331 for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) { in fd_bc_alloc_batch()
332 batch = cache->batches[i]; in fd_bc_alloc_batch()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dsvdf_test.cc131 BaseSVDFOpModel(int batches, int units, int input_size, int memory_size, in BaseSVDFOpModel() argument
136 : batches_(batches), in BaseSVDFOpModel()
147 TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}}); in BaseSVDFOpModel()
158 {batches, memory_size * num_filters} // activation_state tensor in BaseSVDFOpModel()
206 HybridSVDFOpModel(int batches, int units, int input_size, int memory_size, in HybridSVDFOpModel() argument
209 : BaseSVDFOpModel(batches, units, input_size, memory_size, rank, in HybridSVDFOpModel()
470 IntegerSVDFOpModel(int batches, int units, int input_size, int memory_size, in IntegerSVDFOpModel() argument
472 : batches_(batches), in IntegerSVDFOpModel()
478 input_ = AddInput({TensorType_INT8, {batches, input_size}, -1, 1}); in IntegerSVDFOpModel()
485 {TensorType_INT16, {batches, memory_size * num_filters}, -16, 16}); in IntegerSVDFOpModel()
[all …]
Dfully_connected_test.cc138 TfLiteRegistration* registration, int units, int batches, in BaseFullyConnectedOpModel() argument
145 : batches_(batches), units_(units) {
296 HybridFullyConnectedOpModel(int units, int batches, const TensorData& input, in HybridFullyConnectedOpModel() argument
301 : batches_(batches), units_(units) {
701 int batches, FullyConnectedOptionsWeightsFormat weights_format) { in SimpleTestQuantizedInt16OutputCase() argument
714 registration, output_depth, batches, in SimpleTestQuantizedInt16OutputCase()
716 {TensorType_UINT8, {batches, input_depth}, kInputMin, kInputMax}, in SimpleTestQuantizedInt16OutputCase()
744 CHECK(batches == 1 || batches == 4); in SimpleTestQuantizedInt16OutputCase()
754 std::vector<float> input_data(input_depth * batches); in SimpleTestQuantizedInt16OutputCase()
773 std::vector<float> expected_output_data(output_depth * batches); in SimpleTestQuantizedInt16OutputCase()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Drecord_input_test.py102 batches = 2
114 batches=batches)
121 for _ in range(int(files * records_per_file / batches)):
123 self.assertTrue(len(op_list) is batches)
159 batches = 2
171 batches=batches)
178 for _ in range(int(files * records_per_file / batches)):
180 self.assertTrue(len(op_list) is batches)
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/sparse_ops/
Dfully_connected.h44 const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); in FullyConnectedSparseWeight() local
56 for (int b = 0; b < batches; ++b) { in FullyConnectedSparseWeight()
66 for (int b = 0; b < batches; ++b) { in FullyConnectedSparseWeight()
91 const int batches = thread_end - thread_start; in FullyConnectedSparseWeight1x4Impl() local
101 weights_shape.Dims(1), input_data + thread_start * input_depth, batches, in FullyConnectedSparseWeight1x4Impl()
175 const int batches = in FullyConnectedSparseWeight1x4() local
177 const int thread_count = std::max(1, std::min(batches, max_threads)); in FullyConnectedSparseWeight1x4()
181 bias_shape, bias_data, output_shape, output_data, 0, batches, in FullyConnectedSparseWeight1x4()
191 int thread_end = thread_start + batches / thread_count; in FullyConnectedSparseWeight1x4()
192 if (i < batches % thread_count) thread_end++; in FullyConnectedSparseWeight1x4()
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/
Dbucket_by_sequence_length_test.py146 batches = []
149 batches.append(batch)
169 for batch in batches:
242 batches = []
245 batches.append(batch)
251 for batch in batches:
294 batches = []
297 batches.append(batch)
303 for batch in batches:
333 batches = []
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dfully_connected.h41 const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); in FullyConnected() local
45 for (int b = 0; b < batches; ++b) { in FullyConnected()
86 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local
90 for (int b = 0; b < batches; ++b) { in FullyConnected()
133 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local
137 for (int b = 0; b < batches; ++b) { in FullyConnected()
186 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in ShuffledFullyConnected() local
195 if (batches == 1) { in ShuffledFullyConnected()
199 } else if (batches == 4) { in ShuffledFullyConnected()
219 if (batches == 1) { in ShuffledFullyConnected()
[all …]
Dpooling.h32 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local
40 for (int batch = 0; batch < batches; ++batch) { in AveragePool()
88 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local
96 for (int batch = 0; batch < batches; ++batch) { in AveragePool()
141 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in L2Pool() local
149 for (int batch = 0; batch < batches; ++batch) { in L2Pool()
195 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local
203 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
249 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local
257 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
Dtranspose_conv.h41 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in TransposeConv() local
67 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
101 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
131 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in TransposeConv() local
158 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
192 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
Dconv.h45 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Conv() local
57 for (int batch = 0; batch < batches; ++batch) { in Conv()
128 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Conv() local
140 for (int batch = 0; batch < batches; ++batch) { in Conv()
208 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in HybridConvPerChannel() local
220 for (int batch = 0; batch < batches; ++batch) { in HybridConvPerChannel()
/external/tensorflow/tensorflow/core/kernels/image/
Dcrop_and_resize_op_benchmark_test.cc24 static Graph* CropAndResize(int batches, int width, int height, int depth, in CropAndResize() argument
27 Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth})); in CropAndResize()
29 Tensor boxes(DT_FLOAT, TensorShape({batches, 4})); in CropAndResize()
31 Tensor box_ind(DT_INT32, TensorShape({batches})); in CropAndResize()
33 for (int i = 0; i < batches; ++i) { in CropAndResize()
Dnon_max_suppression_op_benchmark_test.cc24 static Graph* CombinedNonMaxSuppression(int batches, int box_num, int class_num, in CombinedNonMaxSuppression() argument
27 Tensor boxes(DT_FLOAT, TensorShape({batches, box_num, q, 4})); in CombinedNonMaxSuppression()
29 Tensor scores(DT_FLOAT, TensorShape({batches, box_num, class_num})); in CombinedNonMaxSuppression()
/external/mesa3d/src/gallium/drivers/iris/
Diris_context.c84 ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]); in iris_lost_context_state()
85 assert(&ice->batches[IRIS_BATCH_RENDER] == batch); in iris_lost_context_state()
89 ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]); in iris_lost_context_state()
90 assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch); in iris_lost_context_state()
122 iris_batch_check_for_reset(&ice->batches[i]); in iris_get_device_reset_status()
252 iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]); in iris_destroy_context()
253 iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_destroy_context()
359 screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]); in iris_create_context()
360 screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_create_context()
Diris_perf.c35 iris_emit_end_of_pipe_sync(&ice->batches[IRIS_BATCH_RENDER], in iris_perf_emit_stall_at_pixel_scoreboard()
47 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_emit_mi_report_perf_count()
55 _iris_batch_flush(&ice->batches[IRIS_BATCH_RENDER], __FILE__, __LINE__); in iris_perf_batchbuffer_flush()
64 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_store_register_mem()
Diris_pipe_control.c306 struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_texture_barrier()
307 struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_texture_barrier()
355 if (ice->batches[i].contains_draw) { in iris_memory_barrier()
356 iris_batch_maybe_flush(&ice->batches[i], 24); in iris_memory_barrier()
357 iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", in iris_memory_barrier()
/external/mesa3d/src/mesa/main/
Dglthread.c105 glthread->batches[i].ctx = ctx; in _mesa_glthread_init()
106 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init()
108 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_init()
153 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy()
235 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch()
259 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
/external/tensorflow/tensorflow/lite/kernels/internal/reference/integer_ops/
Dpooling.h32 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local
40 for (int batch = 0; batch < batches; ++batch) { in AveragePool()
93 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local
101 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
148 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local
156 for (int batch = 0; batch < batches; ++batch) { in AveragePool()
209 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local
217 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
Dfully_connected.h41 const int batches = output_shape.Dims(0); in FullyConnected() local
45 for (int b = 0; b < batches; ++b) { in FullyConnected()
81 const int batches = output_shape.Dims(0); in FullyConnected() local
85 for (int b = 0; b < batches; ++b) { in FullyConnected()
Dtranspose_conv.h42 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in TransposeConv() local
66 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
100 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
141 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in TransposeConv() local
163 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
197 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dim2col_utils.h138 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in DilatedIm2col() local
150 const RuntimeShape row_shape({1, batches, output_height, output_width}); in DilatedIm2col()
158 for (int batch = 0; batch < batches; ++batch) { in DilatedIm2col()
224 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Im2col() local
234 for (int b = 0; b < batches; ++b) { in Im2col()
260 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Im2col() local
261 TFLITE_DCHECK_EQ(batches, input_offsets_size); in Im2col()
271 for (int b = 0; b < batches; ++b) { in Im2col()
Ddepthwiseconv_multithread.h93 inline bool MultithreadAlongBatches(int thread_count, int batches) { in MultithreadAlongBatches() argument
97 if (batches < thread_count) { in MultithreadAlongBatches()
107 if (batches >= 2 * thread_count) { in MultithreadAlongBatches()
115 return ((batches % thread_count) == 0); in MultithreadAlongBatches()
/external/tensorflow/tensorflow/lite/kernels/perception/
Dmax_unpooling_2d.cc37 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxUnpooling() local
41 for (int batch = 0; batch < batches; ++batch) { in MaxUnpooling()
85 int batches = input->dims->data[0]; in Prepare() local
100 output_size->data[0] = batches; in Prepare()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dfully_connected.h54 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local
71 rhs_params.cols = batches; in FullyConnected()
76 dst_params.cols = batches; in FullyConnected()

1234567