/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_executable.cc | 187 void* result_buffer = buffer_pointers[result_slice.index()]; in ExecuteComputeFunction() local 194 VLOG(3) << absl::StrFormat(" result = %p", result_buffer); in ExecuteComputeFunction() 205 compute_function_(result_buffer, run_options, nullptr, buffer_pointers.data(), in ExecuteComputeFunction() 243 se::DeviceMemoryBase& result_buffer = p.second; in CreateResultShapedBuffer() local 280 result_buffer = argument_buffer; in CreateResultShapedBuffer() 299 result_buffer = allocated_buffer.Release(); in CreateResultShapedBuffer() 301 CHECK_EQ(result_buffer.size(), in CreateResultShapedBuffer() 303 std::memcpy(/*dest=*/result_buffer.opaque(), in CreateResultShapedBuffer() 305 /*n=*/result_buffer.size()); in CreateResultShapedBuffer() 306 registered_buffer = result_buffer; in CreateResultShapedBuffer() [all …]
|
/external/mesa3d/src/gallium/drivers/iris/ |
D | iris_monitor.c | 36 unsigned char *result_buffer; member 213 monitor->result_buffer = calloc(1, monitor->result_size); in iris_create_monitor_object() 214 if (unlikely(!monitor->result_buffer)) in iris_create_monitor_object() 222 free(monitor->result_buffer); in iris_create_monitor_object() 236 free(monitor->result_buffer); in iris_destroy_monitor_object() 237 monitor->result_buffer = NULL; in iris_destroy_monitor_object() 288 (unsigned*) monitor->result_buffer, in iris_get_monitor_result() 303 result[i].u64 = *(uint64_t*)(monitor->result_buffer + counter->offset); in iris_get_monitor_result() 306 result[i].f = *(float*)(monitor->result_buffer + counter->offset); in iris_get_monitor_result() 310 result[i].u64 = *(uint32_t*)(monitor->result_buffer + counter->offset); in iris_get_monitor_result() [all …]
|
/external/python/cpython2/Lib/test/ |
D | symlink_support.py | 62 result_buffer = ctypes.wintypes.LPWSTR() 70 ctypes.byref(result_buffer), 78 message = result_buffer.value 79 ctypes.windll.kernel32.LocalFree(result_buffer)
|
/external/tensorflow/tensorflow/core/lib/io/ |
D | random_inputstream.cc | 39 char* result_buffer = &(*result)[0]; in ReadNBytes() local 41 Status s = file_->Read(pos_, bytes_to_read, &data, result_buffer); in ReadNBytes() 42 if (data.data() != result_buffer) { in ReadNBytes() 43 memmove(result_buffer, data.data(), data.size()); in ReadNBytes()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_conv_algorithm_picker.cc | 127 se::DeviceMemoryBase result_buffer, se::StreamExecutor* stream_exec, in GetMIOpenAlgorithms() argument 140 GetGpuConvParams(config, operand_buffers, result_buffer)); in GetMIOpenAlgorithms() 413 TF_ASSIGN_OR_RETURN(auto result_buffer, in PickBestAlgorithmNoCacheCuda() 416 initialize_buffer(result_buffer, result_shape); in PickBestAlgorithmNoCacheCuda() 474 RunGpuConv(config, absl::MakeSpan(operand_buffers), result_buffer, in PickBestAlgorithmNoCacheCuda() 533 stream, reference_result_buffer, result_buffer); in PickBestAlgorithmNoCacheCuda() 557 reinterpret_cast<uint64>(result_buffer.opaque())); in PickBestAlgorithmNoCacheCuda() 568 input_output_allocator.AllocateBytes(result_buffer.size())); in PickBestAlgorithmNoCacheCuda() 569 stream->ThenMemcpy(&reference_result_buffer, result_buffer, in PickBestAlgorithmNoCacheCuda() 570 result_buffer.size()); in PickBestAlgorithmNoCacheCuda() [all …]
|
D | gpu_conv_runner.h | 112 se::DeviceMemoryBase result_buffer, 118 se::DeviceMemoryBase result_buffer, 151 se::DeviceMemoryBase result_buffer);
|
D | gpu_executable.cc | 465 se::DeviceMemoryBase& result_buffer = p.second; in ExecuteAsyncOnStreamImpl() local 502 result_buffer = argument_buffer; in ExecuteAsyncOnStreamImpl() 523 result_buffer = allocated_buffer.Release(); in ExecuteAsyncOnStreamImpl() 527 CHECK_EQ(aliased_buffer.size(), result_buffer.size()); in ExecuteAsyncOnStreamImpl() 528 run_options->stream()->ThenMemcpyD2D(&result_buffer, aliased_buffer, in ExecuteAsyncOnStreamImpl() 530 aliased_buffer = result_buffer; in ExecuteAsyncOnStreamImpl() 534 if (result_buffer.is_null()) { in ExecuteAsyncOnStreamImpl() 537 result_buffer = in ExecuteAsyncOnStreamImpl() 546 buffers_in_result.insert(result_buffer); in ExecuteAsyncOnStreamImpl()
|
D | convolution_thunk.cc | 51 se::DeviceMemoryBase result_buffer = in ExecuteOnStream() local 60 result_buffer, scratch, params.stream)); in ExecuteOnStream()
|
D | gpu_conv_runner.cc | 460 se::DeviceMemoryBase result_buffer) { in GetGpuConvParams() argument 469 params.output_buf = result_buffer; in GetGpuConvParams() 472 params.input_buf = result_buffer; in GetGpuConvParams() 478 params.filter_buf = result_buffer; in GetGpuConvParams() 497 se::DeviceMemoryBase result_buffer, in RunGpuConv() argument 501 return RunGpuConv(config, operand_buffers, result_buffer, &scratch_allocator, in RunGpuConv() 507 se::DeviceMemoryBase result_buffer, in RunGpuConv() argument 511 GetGpuConvParams(config, operand_buffers, result_buffer)); in RunGpuConv()
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | tpu_executable_interface.cc | 100 se::DeviceMemoryBase& result_buffer = pair.second; in AllocateOutputMemoryWithInputReuse() local 126 result_buffer = device_memory_base; in AllocateOutputMemoryWithInputReuse() 143 if (result_buffer.is_null()) { in AllocateOutputMemoryWithInputReuse() 154 result_buffer = allocated_buffer.Release(); in AllocateOutputMemoryWithInputReuse() 156 TF_RET_CHECK(allocation_bytes == 0 || result_buffer != nullptr); in AllocateOutputMemoryWithInputReuse()
|
/external/python/pyopenssl/src/OpenSSL/ |
D | crypto.py | 142 result_buffer = _ffi.new("char**") 143 buffer_length = _lib.BIO_get_mem_data(bio, result_buffer) 144 return _ffi.buffer(result_buffer[0], buffer_length)[:] 621 result_buffer = _ffi.new("unsigned char**") 622 data_length = _lib.ASN1_STRING_to_UTF8(result_buffer, data) 626 result = _ffi.buffer(result_buffer[0], data_length)[:].decode( 631 _lib.OPENSSL_free(result_buffer[0]) 656 result_buffer = _ffi.new("char[]", 512) 658 self._name, result_buffer, len(result_buffer) 663 _native(_ffi.string(result_buffer)), [all …]
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | pjrt_stream_executor_client.cc | 1551 ScopedShapedBuffer* result_buffer, in OutputBufferHelper() argument 1555 TrackedDeviceBuffer::FromScopedShapedBuffer(result_buffer, in OutputBufferHelper() 1558 result_buffer->on_device_shape(), std::move(out_buffer), client, device); in OutputBufferHelper() 1824 ScopedShapedBuffer result_buffer, in MakeOutputBuffers() argument 1829 if (options.untuple_result && result_buffer.on_device_shape().IsTuple()) { in MakeOutputBuffers() 1830 int tuple_count = result_buffer.on_device_shape().tuple_shapes_size(); in MakeOutputBuffers() 1835 ScopedShapedBuffer tuple_buffer = result_buffer.TakeSubTree({i}); in MakeOutputBuffers() 1841 ShapedBuffer root_buffer_holder = result_buffer.release(); in MakeOutputBuffers() 1850 outputs.push_back(OutputBufferHelper(&result_buffer, definition_event, in MakeOutputBuffers() 1898 ScopedShapedBuffer result_buffer = in ExecuteHelper() local [all …]
|
D | pjrt_stream_executor_client.h | 697 ScopedShapedBuffer result_buffer,
|
/external/tensorflow/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/ |
D | hlo_legalize_to_lhlo.cc | 262 Value result_buffer = InsertDynamicAllocAndDealloc( in matchAndRewrite() local 265 rewriter.create<lmhlo::CopyOp>(loc, result, result_buffer); in matchAndRewrite() 266 result = result_buffer; in matchAndRewrite()
|
/external/libaom/libaom/av1/encoder/ |
D | temporal_filter.c | 864 const uint16_t *count, YV12_BUFFER_CONFIG *result_buffer) { in tf_normalize_filtered_frame() argument 871 const int is_high_bitdepth = is_frame_high_bitdepth(result_buffer); in tf_normalize_filtered_frame() 877 const int frame_stride = result_buffer->strides[plane == 0 ? 0 : 1]; in tf_normalize_filtered_frame() 879 uint8_t *const buf = result_buffer->buffers[plane]; in tf_normalize_filtered_frame()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | service.cc | 717 TF_ASSIGN_OR_RETURN(const ShapedBuffer* result_buffer, in ExecuteGraphParallel() 721 TF_RETURN_IF_ERROR(RecordResult(*result_buffer, stream.get(), in ExecuteGraphParallel() 889 const ShapedBuffer* result_buffer, in Execute() 891 TF_RETURN_IF_ERROR(RecordResult(*result_buffer, stream.get(), in Execute()
|
/external/tensorflow/tensorflow/core/platform/cloud/ |
D | gcs_file_system.cc | 1558 std::vector<char> result_buffer; in GetBucketLocation() local 1559 Status status = GetBucketMetadata(bucket, &result_buffer); in GetBucketLocation() 1561 TF_RETURN_IF_ERROR(ParseJson(result_buffer, &result)); in GetBucketLocation() 1577 std::vector<char>* result_buffer) { in GetBucketMetadata() argument 1582 if (result_buffer != nullptr) { in GetBucketMetadata() 1583 request->SetResultBuffer(result_buffer); in GetBucketMetadata()
|
D | gcs_file_system.h | 360 std::vector<char>* result_buffer);
|
/external/tensorflow/tensorflow/compiler/xla/tools/ |
D | driver.cc | 58 extern void EntryModule(char* result_buffer, char* run_opts, char** params,
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | dot_handler.cc | 631 auto result_buffer = CreateZero(padded_result_buffer_shape, b); in PartitionBaseCase() local 1129 result_buffer->shape(), in PartitionBaseCase() 1137 result_buffer->shape(), param, 2)); in PartitionBaseCase() 1405 result_buffer->shape(), in PartitionBaseCase() 1423 {lhs.hlo(), rhs.hlo(), result_buffer, extra_buffer, iteration})))); in PartitionBaseCase() 1428 result_buffer->shape(), while_loop, 2)); in PartitionBaseCase()
|