/external/igt-gpu-tools/tests/i915/ |
D | gem_exec_gttfill.c | 38 struct batch *batches = array; in xchg_batch() local 41 tmp = batches[i]; in xchg_batch() 42 batches[i] = batches[j]; in xchg_batch() 43 batches[j] = tmp; in xchg_batch() 49 struct batch *batches, unsigned int count) in submit() argument 90 obj.handle = batches[i].handle; in submit() 98 memcpy(batches[i].ptr + eb->batch_start_offset, in submit() 113 struct batch *batches; in fillgtt() local 155 batches = calloc(count, sizeof(*batches)); in fillgtt() 156 igt_assert(batches); in fillgtt() [all …]
|
D | gem_exec_whisper.c | 173 struct drm_i915_gem_exec_object2 batches[1024]; in whisper() local 311 memset(batches, 0, sizeof(batches)); in whisper() 313 batches[n].handle = gem_create(fd, 4096); in whisper() 314 gem_write(fd, batches[n].handle, 0, &bbe, sizeof(bbe)); in whisper() 316 execbuf.buffers_ptr = to_user_pointer(batches); in whisper() 337 batches[n].relocs_ptr = to_user_pointer(&inter[n]); in whisper() 338 batches[n].relocation_count = 1; in whisper() 339 gem_write(fd, batches[n].handle, 0, batch, sizeof(batch)); in whisper() 341 old_offset = batches[n].offset; in whisper() 385 gem_write(fd, batches[1023].handle, loc, &pass, sizeof(pass)); in whisper() [all …]
|
/external/mesa3d/src/gallium/drivers/freedreno/ |
D | freedreno_batch_cache.c | 137 struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0}; in bc_flush() local 145 fd_batch_reference_locked(&batches[n++], batch); in bc_flush() 153 if (batches[i] && (batches[i]->ctx == ctx) && in bc_flush() 154 (batches[i] != current_batch)) { in bc_flush() 155 fd_batch_add_dep(current_batch, batches[i]); in bc_flush() 164 fd_batch_flush(batches[i]); in bc_flush() 169 fd_batch_reference(&batches[i], NULL); in bc_flush() 267 cache->batches[batch->idx] = NULL; in fd_bc_invalidate_batch() 331 for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) { in fd_bc_alloc_batch() 332 batch = cache->batches[i]; in fd_bc_alloc_batch() [all …]
|
D | freedreno_batch_cache.h | 51 struct fd_batch *batches[32]; member 61 for (uint32_t _m = (mask); _m && ((batch) = (cache)->batches[u_bit_scan(&_m)]); _m &= (mask))
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | svdf_test.cc | 131 BaseSVDFOpModel(int batches, int units, int input_size, int memory_size, in BaseSVDFOpModel() argument 135 : batches_(batches), in BaseSVDFOpModel() 146 TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}}, in BaseSVDFOpModel() 157 {batches, memory_size * num_filters} // activation_state tensor in BaseSVDFOpModel() 205 HybridSVDFOpModel(int batches, int units, int input_size, int memory_size, in HybridSVDFOpModel() argument 207 : BaseSVDFOpModel(batches, units, input_size, memory_size, rank, in HybridSVDFOpModel() 461 IntegerSVDFOpModel(int batches, int units, int input_size, int memory_size, in IntegerSVDFOpModel() argument 463 : batches_(batches), in IntegerSVDFOpModel() 469 input_ = AddInput({TensorType_INT8, {batches, input_size}, -1, 1}); in IntegerSVDFOpModel() 476 {TensorType_INT16, {batches, memory_size * num_filters}, -16, 16}, in IntegerSVDFOpModel() [all …]
|
D | fully_connected_test.cc | 128 TfLiteRegistration* registration, int units, int batches, in BaseFullyConnectedOpModel() argument 135 : batches_(batches), units_(units) { 286 HybridFullyConnectedOpModel(int units, int batches, const TensorData& input, in HybridFullyConnectedOpModel() argument 289 : batches_(batches), units_(units) { 685 int batches, FullyConnectedOptionsWeightsFormat weights_format) { in SimpleTestQuantizedInt16OutputCase() argument 698 registration, output_depth, batches, in SimpleTestQuantizedInt16OutputCase() 700 {TensorType_UINT8, {batches, input_depth}, kInputMin, kInputMax}, in SimpleTestQuantizedInt16OutputCase() 726 CHECK(batches == 1 || batches == 4); in SimpleTestQuantizedInt16OutputCase() 734 std::vector<float> input_data(input_depth * batches); in SimpleTestQuantizedInt16OutputCase() 753 std::vector<float> expected_output_data(output_depth * batches); in SimpleTestQuantizedInt16OutputCase() [all …]
|
D | basic_rnn_test.cc | 176 RNNOpModel(int batches, int units, int size, in RNNOpModel() argument 179 : batches_(batches), units_(units), input_size_(size) { in RNNOpModel() 236 HybridRNNOpModel(int batches, int units, int size, TensorType tensor_type) in HybridRNNOpModel() argument 237 : RNNOpModel(batches, units, size, tensor_type, tensor_type) { in HybridRNNOpModel()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | record_input_test.py | 102 batches = 2 114 batches=batches) 121 for _ in range(int(files * records_per_file / batches)): 123 self.assertTrue(len(op_list) is batches) 159 batches = 2 171 batches=batches) 178 for _ in range(int(files * records_per_file / batches)): 180 self.assertTrue(len(op_list) is batches)
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/ |
D | bucket_by_sequence_length_test.py | 145 batches = [] 148 batches.append(batch) 168 for batch in batches: 240 batches = [] 243 batches.append(batch) 249 for batch in batches: 291 batches = [] 294 batches.append(batch) 300 for batch in batches: 329 batches = [] [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | fully_connected.h | 43 const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); in FullyConnected() local 47 for (int b = 0; b < batches; ++b) { in FullyConnected() 88 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local 92 for (int b = 0; b < batches; ++b) { in FullyConnected() 135 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local 139 for (int b = 0; b < batches; ++b) { in FullyConnected() 187 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in ShuffledFullyConnected() local 196 if (batches == 1) { in ShuffledFullyConnected() 200 } else if (batches == 4) { in ShuffledFullyConnected() 220 if (batches == 1) { in ShuffledFullyConnected() [all …]
|
D | pooling.h | 32 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local 40 for (int batch = 0; batch < batches; ++batch) { in AveragePool() 87 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local 95 for (int batch = 0; batch < batches; ++batch) { in AveragePool() 140 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in L2Pool() local 148 for (int batch = 0; batch < batches; ++batch) { in L2Pool() 194 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local 202 for (int batch = 0; batch < batches; ++batch) { in MaxPool() 248 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local 256 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
|
D | conv.h | 48 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Conv() local 60 for (int batch = 0; batch < batches; ++batch) { in Conv() 128 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Conv() local 140 for (int batch = 0; batch < batches; ++batch) { in Conv() 205 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in HybridConvPerChannel() local 217 for (int batch = 0; batch < batches; ++batch) { in HybridConvPerChannel()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | crop_and_resize_op_benchmark_test.cc | 24 static Graph* BM_CropAndResize(int batches, int width, int height, int depth, in BM_CropAndResize() argument 27 Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth})); in BM_CropAndResize() 29 Tensor boxes(DT_FLOAT, TensorShape({batches, 4})); in BM_CropAndResize() 31 Tensor box_ind(DT_INT32, TensorShape({batches})); in BM_CropAndResize() 33 for (int i = 0; i < batches; ++i) { in BM_CropAndResize()
|
/external/mesa3d/src/gallium/drivers/iris/ |
D | iris_context.c | 84 ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]); in iris_lost_context_state() 85 assert(&ice->batches[IRIS_BATCH_RENDER] == batch); in iris_lost_context_state() 89 ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]); in iris_lost_context_state() 90 assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch); in iris_lost_context_state() 122 iris_batch_check_for_reset(&ice->batches[i]); in iris_get_device_reset_status() 252 iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]); in iris_destroy_context() 253 iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_destroy_context() 359 screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]); in iris_create_context() 360 screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_create_context()
|
D | iris_perf.c | 35 iris_emit_end_of_pipe_sync(&ice->batches[IRIS_BATCH_RENDER], in iris_perf_emit_stall_at_pixel_scoreboard() 47 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_emit_mi_report_perf_count() 55 _iris_batch_flush(&ice->batches[IRIS_BATCH_RENDER], __FILE__, __LINE__); in iris_perf_batchbuffer_flush() 64 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_store_register_mem()
|
D | iris_pipe_control.c | 306 struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_texture_barrier() 307 struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_texture_barrier() 355 if (ice->batches[i].contains_draw) { in iris_memory_barrier() 356 iris_batch_maybe_flush(&ice->batches[i], 24); in iris_memory_barrier() 357 iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", in iris_memory_barrier()
|
/external/mesa3d/src/mesa/main/ |
D | glthread.c | 105 glthread->batches[i].ctx = ctx; in _mesa_glthread_init() 106 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init() 108 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_init() 153 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy() 235 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch() 259 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | im2col_utils.h | 135 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in DilatedIm2col() local 147 const RuntimeShape row_shape({1, batches, output_height, output_width}); in DilatedIm2col() 155 for (int batch = 0; batch < batches; ++batch) { in DilatedIm2col() 209 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Im2col() local 219 for (int b = 0; b < batches; ++b) { in Im2col() 245 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in Im2col() local 246 TFLITE_DCHECK_EQ(batches, input_offsets_size); in Im2col() 256 for (int b = 0; b < batches; ++b) { in Im2col()
|
D | depthwiseconv_multithread.h | 93 inline bool MultithreadAlongBatches(int thread_count, int batches) { in MultithreadAlongBatches() argument 97 if (batches < thread_count) { in MultithreadAlongBatches() 107 if (batches >= 2 * thread_count) { in MultithreadAlongBatches() 115 return ((batches % thread_count) == 0); in MultithreadAlongBatches()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/integer_ops/ |
D | fully_connected.h | 41 const int batches = output_shape.Dims(0); in FullyConnected() local 45 for (int b = 0; b < batches; ++b) { in FullyConnected() 83 const int batches = output_shape.Dims(0); in FullyConnected() local 87 for (int b = 0; b < batches; ++b) { in FullyConnected()
|
D | pooling.h | 31 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in AveragePool() local 39 for (int batch = 0; batch < batches; ++batch) { in AveragePool() 92 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in MaxPool() local 100 for (int batch = 0; batch < batches; ++batch) { in MaxPool()
|
D | transpose_conv.h | 41 const int batches = MatchingDim(input_shape, 0, output_shape, 0); in TransposeConv() local 62 for (int batch = 0; batch < batches; ++batch) { in TransposeConv() 96 for (int batch = 0; batch < batches; ++batch) { in TransposeConv()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | fully_connected.h | 52 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); in FullyConnected() local 69 rhs_params.cols = batches; in FullyConnected() 74 dst_params.cols = batches; in FullyConnected()
|
/external/mesa3d/src/gallium/drivers/zink/ |
D | zink_context.h | 87 struct zink_batch batches[4]; member 153 assert(ctx->curr_batch < ARRAY_SIZE(ctx->batches)); in zink_curr_batch() 154 return ctx->batches + ctx->curr_batch; in zink_curr_batch()
|
/external/igt-gpu-tools/benchmarks/wsim/ |
D | README | 25 't' - Throttle every n batches. 55 2-4. Now three batches are sent to RCS with durations of 0.5-1.5ms (random 83 duration field. Such batches must be ended by the terminate command ('T') 110 VCS1 and VCS2 batches will have a sync fence dependency on the RCS batch. 123 VCS1 and VCS2 batches have an input sync fence dependecy on the standalone fence 126 which allows the two VCS batches to be executed. Finally we wait until the both 127 VCS batches have completed before starting the (optional) next iteration. 143 Here VCS1 and VCS2 batches will only be submitted for executing once the RCS 170 Context 1 is marked as non-preemptable batches and a batch is sent against 1. 171 The same context is then marked to have batches which can be preempted every
|