/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | buffer_allocations.cc | 59 BufferAllocation::Index buffer_index) const { in GetDeviceAddress() 60 CHECK_GE(buffer_index, 0); in GetDeviceAddress() 61 CHECK_LT(buffer_index, buffers_.size()); in GetDeviceAddress() 62 return buffers_[buffer_index]; in GetDeviceAddress() 66 BufferAllocation::Index buffer_index) { in GetMutableDeviceAddress() argument 67 CHECK_GE(buffer_index, 0); in GetMutableDeviceAddress() 68 CHECK_LT(buffer_index, buffers_.size()); in GetMutableDeviceAddress() 69 return buffers_[buffer_index]; in GetMutableDeviceAddress()
|
/external/cronet/stable/base/containers/ |
D | ring_buffer.h | 47 const size_t buffer_index = BufferIndex(n); in ReadBuffer() local 48 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in ReadBuffer() 49 return buffer_[buffer_index]; in ReadBuffer() 53 const size_t buffer_index = BufferIndex(n); in MutableReadBuffer() local 54 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in MutableReadBuffer() 55 return &buffer_[buffer_index]; in MutableReadBuffer() 125 inline bool IsFilledIndexByBufferIndex(size_t buffer_index) const { in IsFilledIndexByBufferIndex() argument 126 return buffer_index < current_index_; in IsFilledIndexByBufferIndex()
|
/external/libchrome/base/containers/ |
D | ring_buffer.h | 43 const size_t buffer_index = BufferIndex(n); in ReadBuffer() local 44 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in ReadBuffer() 45 return buffer_[buffer_index]; in ReadBuffer() 49 const size_t buffer_index = BufferIndex(n); in MutableReadBuffer() local 50 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in MutableReadBuffer() 51 return &buffer_[buffer_index]; in MutableReadBuffer() 121 inline bool IsFilledIndexByBufferIndex(size_t buffer_index) const { in IsFilledIndexByBufferIndex() argument 122 return buffer_index < current_index_; in IsFilledIndexByBufferIndex()
|
/external/cronet/tot/base/containers/ |
D | ring_buffer.h | 47 const size_t buffer_index = BufferIndex(n); in ReadBuffer() local 48 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in ReadBuffer() 49 return buffer_[buffer_index]; in ReadBuffer() 53 const size_t buffer_index = BufferIndex(n); in MutableReadBuffer() local 54 CHECK(IsFilledIndexByBufferIndex(buffer_index)); in MutableReadBuffer() 55 return &buffer_[buffer_index]; in MutableReadBuffer() 125 inline bool IsFilledIndexByBufferIndex(size_t buffer_index) const { in IsFilledIndexByBufferIndex() argument 126 return buffer_index < current_index_; in IsFilledIndexByBufferIndex()
|
/external/XNNPACK/bench/ |
D | f16-spmm.cc | 71 for (size_t buffer_index = 0; buffer_index < num_buffers; buffer_index++) { in f16_spmm() local 95 nmap[buffer_index * nmap_elements + i] += 1; in f16_spmm() 112 nmap[buffer_index * nmap_elements + i] += 1; in f16_spmm() 121 a_offsets[buffer_index] = first_j * mc; in f16_spmm() 137 size_t buffer_index = 0; in f16_spmm() local 145 buffer_index = (buffer_index + 1) % num_buffers; in f16_spmm() 149 a.data() + a_offsets[buffer_index], in f16_spmm() 150 w.data() + buffer_index * w_elements, in f16_spmm() 151 dmap.data() + buffer_index * dmap_elements, in f16_spmm() 152 nmap.data() + buffer_index * nmap_elements, in f16_spmm() [all …]
|
D | f32-softmax.cc | 116 size_t buffer_index = 0; in DNNLSoftArgMax() local 119 if (++buffer_index == num_buffers) { in DNNLSoftArgMax() 120 buffer_index = 0; in DNNLSoftArgMax() 210 size_t buffer_index = 0; in ThreePassSoftMaxWithRecomputing() local 213 if (++buffer_index == num_buffers) { in ThreePassSoftMaxWithRecomputing() 214 buffer_index = 0; in ThreePassSoftMaxWithRecomputing() 222 …vscaleexpminusmax(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, x… in ThreePassSoftMaxWithRecomputing() 279 size_t buffer_index = 0; in ThreePassSoftMaxWithReloading() local 282 if (++buffer_index == num_buffers) { in ThreePassSoftMaxWithReloading() 283 buffer_index = 0; in ThreePassSoftMaxWithReloading() [all …]
|
D | f16-conv-hwc2chw.cc | 89 size_t buffer_index = 0; in f16_conv_hwc2chw() local 93 buffer_index = (buffer_index + 1) % num_buffers; in f16_conv_hwc2chw() 100 packed_weights.data() + buffer_index * weights_elements, in f16_conv_hwc2chw() 101 output.data() + buffer_index * output_elements, in f16_conv_hwc2chw()
|
D | f32-conv-hwc2chw.cc | 87 size_t buffer_index = 0; in f32_conv_hwc2chw() local 91 buffer_index = (buffer_index + 1) % num_buffers; in f32_conv_hwc2chw() 98 packed_weights.data() + buffer_index * weights_elements, in f32_conv_hwc2chw() 99 output.data() + buffer_index * output_elements, in f32_conv_hwc2chw()
|
D | f32-spmm.cc | 68 for (size_t buffer_index = 0; buffer_index < num_buffers; buffer_index++) { in f32_spmm() local 92 nmap[buffer_index * nmap_elements + i] += 1; in f32_spmm() 109 nmap[buffer_index * nmap_elements + i] += 1; in f32_spmm() 118 a_offsets[buffer_index] = first_j * mc; in f32_spmm() 134 size_t buffer_index = 0; in f32_spmm() local 142 buffer_index = (buffer_index + 1) % num_buffers; in f32_spmm() 146 a.data() + a_offsets[buffer_index], in f32_spmm() 147 w.data() + buffer_index * w_elements, in f32_spmm() 148 dmap.data() + buffer_index * dmap_elements, in f32_spmm() 149 nmap.data() + buffer_index * nmap_elements, in f32_spmm() [all …]
|
D | f32-conv-hwc.cc | 86 size_t buffer_index = 0; in f32_conv_hwc() local 90 buffer_index = (buffer_index + 1) % num_buffers; in f32_conv_hwc() 97 packed_weights.data() + buffer_index * weights_elements, in f32_conv_hwc() 98 output.data() + buffer_index * output_elements, in f32_conv_hwc()
|
D | f32-im2col-gemm.cc | 95 size_t buffer_index = 0; in Im2ColGEMMBenchmark() local 99 buffer_index = (buffer_index + 1) % num_buffers; in Im2ColGEMMBenchmark() 123 w.data() + (buffer_index * nc_stride + n) * (kernel_size * kc_stride + 1), in Im2ColGEMMBenchmark() 124 …c.data() + (buffer_index * output_size + m) * group_output_channels + n, group_output_channels * s… in Im2ColGEMMBenchmark()
|
D | f32-gemm.cc | 82 size_t buffer_index = 0; in GEMMBenchmark() local 90 buffer_index = (buffer_index + 1) % num_buffers; in GEMMBenchmark() 98 w.data() + buffer_index * nc_stride * (kc_stride + 1), in GEMMBenchmark() 99 c.data() + (buffer_index * mc + m) * nc, nc * sizeof(float), nr * sizeof(float), in GEMMBenchmark() 159 size_t buffer_index = 0; in PPMM1PBenchmark() local 167 buffer_index = (buffer_index + 1) % num_buffers; in PPMM1PBenchmark() 176 w.data() + nc_stride * buffer_index * (kc + 1), in PPMM1PBenchmark() 177 c.data() + (mc * buffer_index + m) * nc, nc * sizeof(float), nr * sizeof(float), in PPMM1PBenchmark() 238 size_t buffer_index = 0; in PPMM2PBenchmark() local 246 buffer_index = (buffer_index + 1) % num_buffers; in PPMM2PBenchmark() [all …]
|
D | f16-dwconv.cc | 132 size_t buffer_index = 0; in f16_dwconv() local 136 buffer_index = (buffer_index + 1) % num_buffers; in f16_dwconv() 141 reinterpret_cast<const void**>(i.data() + buffer_index * i_elements + step_height * y), in f16_dwconv() 142 w.data() + buffer_index * w_elements, in f16_dwconv() 143 c.data() + buffer_index * c_elements + y * output_width * channels, in f16_dwconv()
|
D | f32-vscaleextexp.cc | 47 size_t buffer_index = 0; in f32_vscaleextexp() local 54 if (++buffer_index == num_buffers) { in f32_vscaleextexp() 55 buffer_index = 0; in f32_vscaleextexp() 59 …vscaleextexp(elements * sizeof(float), x.data(), y.data() + packed_n * buffer_index, ext_mantissa,… in f32_vscaleextexp()
|
D | bf16-gemm.cc | 75 size_t buffer_index = 0; in bf16_gemm() local 83 buffer_index = (buffer_index + 1) % num_buffers; in bf16_gemm() 93 w.data() + (nc_stride * buffer_index + n) * (kc_stride + 1), in bf16_gemm() 94 c.data() + (mc * buffer_index + m) * nc + n, nc * sizeof(uint16_t), nr * sizeof(uint16_t), in bf16_gemm()
|
D | f32-vscaleexpminusmax.cc | 49 size_t buffer_index = 0; in f32_vscaleexpminusmax() local 56 if (++buffer_index == num_buffers) { in f32_vscaleexpminusmax() 57 buffer_index = 0; in f32_vscaleexpminusmax() 61 …vscaleexpminusmax(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, x… in f32_vscaleexpminusmax()
|
/external/webrtc/modules/desktop_capture/ |
D | screen_capturer_fuchsia.cc | 76 for (uint32_t buffer_index = 0; in ~ScreenCapturerFuchsia() local 77 buffer_index < buffer_collection_info_.buffer_count; buffer_index++) { in ~ScreenCapturerFuchsia() 79 reinterpret_cast<uintptr_t>(virtual_memory_mapped_addrs_[buffer_index]); in ~ScreenCapturerFuchsia() 153 uint32_t buffer_index = result.response().buffer_id(); in CaptureFrame() local 163 frame->CopyPixelsFrom(virtual_memory_mapped_addrs_[buffer_index], stride, in CaptureFrame() 170 screen_capture_->ReleaseFrame(buffer_index, &release_result); in CaptureFrame() 390 for (uint32_t buffer_index = 0; in SetupBuffers() local 391 buffer_index < buffer_collection_info_.buffer_count; buffer_index++) { in SetupBuffers() 392 const zx::vmo& virt_mem = buffer_collection_info_.buffers[buffer_index].vmo; in SetupBuffers() 393 virtual_memory_mapped_addrs_[buffer_index] = nullptr; in SetupBuffers() [all …]
|
/external/crosvm/gpu_display/src/ |
D | gpu_display_wl.rs | 109 buffer_index: Cell<usize>, field 128 let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT; in framebuffer() localVariable 131 .get_slice(buffer_index * self.buffer_size, self.buffer_size) in framebuffer() 142 let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT; in next_buffer_in_use() 155 self.buffer_index in flip() 156 .set((self.buffer_index.get() + 1) % BUFFER_COUNT); in flip() 161 dwl_surface_flip(self.surface(), self.buffer_index.get()); in flip() 435 buffer_index: Cell::new(0), in create_surface()
|
D | gpu_display_x.rs | 329 fn draw_buffer(&mut self, buffer_index: usize) { in draw_buffer() 330 let buffer = match self.buffers.get_mut(buffer_index) { in draw_buffer() 367 fn lazily_allocate_buffer(&mut self, buffer_index: usize) -> Option<&Buffer> { in lazily_allocate_buffer() 368 if buffer_index >= self.buffers.len() { in lazily_allocate_buffer() 372 if self.buffers[buffer_index].is_some() { in lazily_allocate_buffer() 373 return self.buffers[buffer_index].as_ref(); in lazily_allocate_buffer() 414 self.buffers[buffer_index] = Some(Buffer { in lazily_allocate_buffer() 421 self.buffers[buffer_index].as_ref() in lazily_allocate_buffer()
|
/external/executorch/runtime/executor/ |
D | program.cpp | 286 size_t buffer_index, in get_constant_buffer_data() argument 297 buffer_index < num_elems, in get_constant_buffer_data() 300 buffer_index, in get_constant_buffer_data() 309 (*internal_program->constant_segment()->offsets())[buffer_index]); in get_constant_buffer_data() 329 buffer_index < num_elems, in get_constant_buffer_data() 332 buffer_index, in get_constant_buffer_data() 338 constant_buffer[buffer_index]->storage()->size() <= nbytes, in get_constant_buffer_data() 341 constant_buffer[buffer_index]->storage()->size(), in get_constant_buffer_data() 345 constant_buffer[buffer_index]->storage()->data()); in get_constant_buffer_data()
|
/external/tflite-support/tensorflow_lite_support/codegen/ |
D | metadata_helper.cc | 33 const auto buffer_index = model->metadata()->Get(i)->buffer(); in GetMetadataFromModel() local 35 model->buffers()->size() <= buffer_index) { in GetMetadataFromModel() 38 const auto* buffer_vec = model->buffers()->Get(buffer_index)->data(); in GetMetadataFromModel()
|
/external/mesa3d/src/amd/vpelib/src/core/ |
D | color_gamma.c | 297 if (cal_buffer->buffer_index == 0) { in translate_from_linear_space() 305 if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START && in translate_from_linear_space() 306 cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) || in translate_from_linear_space() 307 (cal_buffer->buffer_index < 16)) in translate_from_linear_space() 311 cal_buffer->gamma_of_2, cal_buffer->buffer[cal_buffer->buffer_index % 16]); in translate_from_linear_space() 313 if (cal_buffer->buffer_index != -1) { in translate_from_linear_space() 314 cal_buffer->buffer[cal_buffer->buffer_index % 16] = scratch_2; in translate_from_linear_space() 315 cal_buffer->buffer_index++; in translate_from_linear_space() 451 cal_buffer->buffer_index = 0; // see variable definition for more info in build_regamma() 464 cal_buffer->buffer_index = -1; in build_regamma()
|
/external/tensorflow/tensorflow/lite/tools/ |
D | verifier_internal_test.cc | 64 int buffer_index = 0; in AddTensor() local 66 buffer_index = buffers_.size(); in AddTensor() 71 buffer_index, name, in AddTensor() 75 tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index, in AddTensor()
|
/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/ |
D | ngrams.cc | 173 int buffer_index = 0; in Eval() local 175 output_row_splits[i] = buffer_index; in Eval() 183 ++buffer_index; in Eval() 186 output_row_splits[n_row_splits - 1] = buffer_index; in Eval()
|
/external/igt-gpu-tools/tools/ |
D | intel_perf_counters.c | 373 static const int buffer_index[GEN6_COUNTER_COUNT] = in gen6_get_counters() local 396 totals[i] += stats_result[buffer_index[i]] - last_counter[i]; in gen6_get_counters() 397 last_counter[i] = stats_result[buffer_index[i]]; in gen6_get_counters()
|