/external/eigen/unsupported/test/ |
D | cxx11_tensor_gpu.cu | 36 gpuMemcpy(d_in1, in1.data(), tensor_bytes, gpuMemcpyHostToDevice); in test_gpu_nullary() 37 gpuMemcpy(d_in2, in2.data(), tensor_bytes, gpuMemcpyHostToDevice); in test_gpu_nullary() 87 gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); in test_gpu_elementwise_small() 88 gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); in test_gpu_elementwise_small() 141 gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); in test_gpu_elementwise() 142 gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); in test_gpu_elementwise() 143 gpuMemcpy(d_in3, in3.data(), in3_bytes, gpuMemcpyHostToDevice); in test_gpu_elementwise() 185 gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); in test_gpu_props() 223 gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); in test_gpu_reduction() 282 gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); in test_gpu_contraction() [all …]
|
D | cxx11_tensor_contract_gpu.cu | 53 gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); in test_gpu_contraction() 54 gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); in test_gpu_contraction() 70 gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); in test_gpu_contraction() 117 gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); in test_scalar() 118 gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); in test_scalar() 133 gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); in test_scalar()
|
D | cxx11_tensor_device.cu | 72 …assert(gpuMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); in GPUContext() 76 …assert(gpuMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); in GPUContext() 80 …assert(gpuMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); in GPUContext() 289 gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); in test_gpu() 290 gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); in test_gpu() 298 assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); in test_gpu() 308 assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); in test_gpu() 318 assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); in test_gpu() 328 assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); in test_gpu()
|
D | cxx11_tensor_argmax_gpu.cu | 43 gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); in test_gpu_simple_argmax() 105 gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); in test_gpu_argmax_dim() 139 gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); in test_gpu_argmax_dim() 194 gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); in test_gpu_argmin_dim() 228 gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); in test_gpu_argmin_dim()
|
D | cxx11_tensor_scan_gpu.cu | 43 gpuMemcpy(d_t_input, t_input.data(), t_input_bytes, gpuMemcpyHostToDevice); in test_gpu_cumsum() 56 gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); in test_gpu_cumsum()
|
/external/eigen/test/ |
D | gpu_common.h | 53 gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); in run_on_gpu() 54 gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice); in run_on_gpu() 87 …gpuMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost… in run_on_gpu() 88 gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost); in run_on_gpu()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorGpuHipCudaDefines.h | 53 #define gpuMemcpy hipMemcpy macro 83 #define gpuMemcpy cudaMemcpy macro
|
D | TensorGpuHipCudaUndefines.h | 38 #undef gpuMemcpy
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | custom_call_test.cc | 41 #define gpuMemcpy cudaMemcpy macro 48 #define gpuMemcpy hipMemcpy macro
|