/external/sl4a/Common/src/com/googlecode/android_scripting/facade/wifi/ |
D | WifiP2pManagerFacade.java | 144 WifiP2pDevice srcDevice) { in onDnsSdServiceAvailable() argument 148 msg.putString("SourceDeviceName", srcDevice.deviceName); in onDnsSdServiceAvailable() 149 msg.putString("SourceDeviceAddress", srcDevice.deviceAddress); in onDnsSdServiceAvailable() 165 Map<String, String> txtRecordMap, WifiP2pDevice srcDevice) { in onDnsSdTxtRecordAvailable() argument 173 msg.putString("SourceDeviceName", srcDevice.deviceName); in onDnsSdTxtRecordAvailable() 174 msg.putString("SourceDeviceAddress", srcDevice.deviceAddress); in onDnsSdTxtRecordAvailable() 261 WifiP2pDevice srcDevice) { in onUpnpServiceAvailable() argument 263 msg.putParcelable("Device", srcDevice); in onUpnpServiceAvailable()
|
/external/pytorch/c10/cuda/ |
D | CUDACachingAllocator.h | 281 int srcDevice, 437 int srcDevice, in memcpyAsync() argument 442 dst, dstDevice, src, srcDevice, count, stream, p2p_enabled); in memcpyAsync()
|
D | CUDAMallocAsyncAllocator.cpp | 876 int srcDevice, in memcpyAsync() 880 if (p2p_enabled || dstDevice == srcDevice) { in memcpyAsync() 883 return cudaMemcpyPeerAsync(dst, dstDevice, src, srcDevice, count, stream); in memcpyAsync()
|
D | CUDACachingAllocator.cpp | 3583 int srcDevice, in memcpyAsync() argument 3588 srcDevice == dstDevice || // memcpy ok on a single device in memcpyAsync() 3591 !device_allocator[srcDevice]->hasAllocatedExpandableSegments())) { in memcpyAsync() 3596 return cudaMemcpyPeerAsync(dst, dstDevice, src, srcDevice, count, stream); in memcpyAsync()
|
/external/tensorflow/tensorflow/compiler/xla/stream_executor/cuda/ |
D | cuda_9_0.inc | 533 CUdeviceptr srcDevice, CUcontext srcContext, 539 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 550 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 555 return func_ptr(dstHost, srcDevice, ByteCount); 558 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 563 return func_ptr(dstDevice, srcDevice, ByteCount); 567 CUdeviceptr srcDevice, size_t ByteCount) { 571 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 645 CUdeviceptr srcDevice, CUcontext srcContext, 651 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_10_0.inc | 543 CUdeviceptr srcDevice, CUcontext srcContext, 549 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 560 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 565 return func_ptr(dstHost, srcDevice, ByteCount); 568 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 573 return func_ptr(dstDevice, srcDevice, ByteCount); 577 CUdeviceptr srcDevice, size_t ByteCount) { 581 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 655 CUdeviceptr srcDevice, CUcontext srcContext, 661 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_10_1.inc | 543 CUdeviceptr srcDevice, CUcontext srcContext, 549 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 560 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 565 return func_ptr(dstHost, srcDevice, ByteCount); 568 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 573 return func_ptr(dstDevice, srcDevice, ByteCount); 577 CUdeviceptr srcDevice, size_t ByteCount) { 581 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 655 CUdeviceptr srcDevice, CUcontext srcContext, 661 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_10_2.inc | 551 CUdeviceptr srcDevice, CUcontext srcContext, 557 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 568 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 573 return func_ptr(dstHost, srcDevice, ByteCount); 576 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 581 return func_ptr(dstDevice, srcDevice, ByteCount); 585 CUdeviceptr srcDevice, size_t ByteCount) { 589 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 663 CUdeviceptr srcDevice, CUcontext srcContext, 669 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_11_2.inc | 599 CUdeviceptr srcDevice, CUcontext srcContext, 605 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 616 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 621 return func_ptr(dstHost, srcDevice, ByteCount); 624 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 629 return func_ptr(dstDevice, srcDevice, ByteCount); 633 CUdeviceptr srcDevice, size_t ByteCount) { 637 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 711 CUdeviceptr srcDevice, CUcontext srcContext, 717 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_11_0.inc | 604 CUdeviceptr srcDevice, CUcontext srcContext, 610 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount); 621 CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, 626 return func_ptr(dstHost, srcDevice, ByteCount); 629 CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, 634 return func_ptr(dstDevice, srcDevice, ByteCount); 638 CUdeviceptr srcDevice, size_t ByteCount) { 642 return func_ptr(dstArray, dstOffset, srcDevice, ByteCount); 716 CUdeviceptr srcDevice, CUcontext srcContext, 722 return func_ptr(dstDevice, dstContext, srcDevice, srcContext, ByteCount, [all …]
|
D | cuda_runtime_9_0.inc | 238 int srcDevice, int dstDevice) { 243 return func_ptr(value, attr, srcDevice, dstDevice); 790 int srcDevice, 796 return func_ptr(dst, dstDevice, src, srcDevice, count); 910 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 916 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
D | cuda_runtime_10_1.inc | 239 int srcDevice, int dstDevice) { 244 return func_ptr(value, attr, srcDevice, dstDevice); 910 int srcDevice, 916 return func_ptr(dst, dstDevice, src, srcDevice, count); 997 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 1003 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
D | cuda_runtime_10_0.inc | 239 int srcDevice, int dstDevice) { 244 return func_ptr(value, attr, srcDevice, dstDevice); 915 int srcDevice, 921 return func_ptr(dst, dstDevice, src, srcDevice, count); 1035 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 1041 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
D | cuda_runtime_10_2.inc | 248 int srcDevice, int dstDevice) { 253 return func_ptr(value, attr, srcDevice, dstDevice); 919 int srcDevice, 925 return func_ptr(dst, dstDevice, src, srcDevice, count); 1006 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 1012 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
D | cuda_runtime_11_2.inc | 271 int srcDevice, int dstDevice) { 276 return func_ptr(value, attr, srcDevice, dstDevice); 1022 int srcDevice, 1028 return func_ptr(dst, dstDevice, src, srcDevice, count); 1109 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 1115 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
D | cuda_runtime_11_0.inc | 304 int srcDevice, int dstDevice) { 309 return func_ptr(value, attr, srcDevice, dstDevice); 1090 int srcDevice, 1096 return func_ptr(dst, dstDevice, src, srcDevice, count); 1177 cudaMemcpyPeerAsync(void *dst, int dstDevice, const void *src, int srcDevice, 1183 return func_ptr(dst, dstDevice, src, srcDevice, count, stream);
|
/external/pytorch/torch/csrc/cuda/ |
D | CUDAPluggableAllocator.h | 157 int srcDevice,
|
D | CUDAPluggableAllocator.cpp | 348 int srcDevice, in memcpyAsync() argument
|
/external/tensorflow/tensorflow/core/profiler/backends/gpu/ |
D | rocm_tracer.cc | 674 event.device_id = data->args.hipMemcpyPeerAsync.srcDevice; in AddMemcpyPeerEventUponApiExit()
|