Home
last modified time | relevance | path

Searched full:is_cuda (Results 1 – 25 of 238) sorted by relevance

12345678910

/external/pytorch/test/distributions/
Dtest_constraints.py85 def build_constraint(constraint_fn, args, is_cuda=False): argument
88 t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
94 "is_cuda",
102 def test_constraint(constraint_fn, result, value, is_cuda): argument
103 t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
111 "is_cuda",
119 def test_biject_to(constraint_fn, args, is_cuda): argument
120 constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
131 if is_cuda:
152 "is_cuda",
[all …]
/external/pytorch/torch/ao/quantization/pt2e/
Dqat_utils.py62 is_cuda: bool,
79 if is_cuda:
637 for is_cuda in is_cuda_options:
639 m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=is_cuda
642 m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=is_cuda
645 m, F.conv_transpose1d, _conv1d_bn_example_inputs, is_cuda=is_cuda
648 m, F.conv_transpose2d, _conv2d_bn_example_inputs, is_cuda=is_cuda
657 is_cuda: bool,
671 conv_bn_pattern, example_inputs, is_cuda
684 is_cuda,
[all …]
Dexport_utils.py142 is_cuda = device is not None and device.type == "cuda"
146 is_cuda,
151 is_cuda,
/external/pytorch/test/cpp/api/
Dtransformer.cpp55 bool is_cuda, in transformer_encoder_layer_test_helper() argument
58 torch::Device device = is_cuda ? torch::kCUDA : torch::kCPU; in transformer_encoder_layer_test_helper()
234 /*is_cuda=*/false, /*use_callable_activation=*/false); in TEST_F()
236 /*is_cuda=*/false, /*use_callable_activation=*/true); in TEST_F()
241 /*is_cuda=*/true, /*use_callable_activation=*/false); in TEST_F()
243 /*is_cuda=*/true, /*use_callable_activation=*/true); in TEST_F()
247 bool is_cuda, in transformer_decoder_layer_test_helper() argument
249 torch::Device device = is_cuda ? torch::kCUDA : torch::kCPU; in transformer_decoder_layer_test_helper()
440 /*is_cuda=*/false, /*use_callable_activation=*/false); in TEST_F()
442 /*is_cuda=*/false, /*use_callable_activation=*/true); in TEST_F()
[all …]
/external/pytorch/aten/src/ATen/test/
Dcuda_reportMemoryUsage_test.cpp22 EXPECT_TRUE(r.device.is_cuda()); in TEST()
38 EXPECT_TRUE(r.device.is_cuda()); in TEST()
54 EXPECT_TRUE(r.device.is_cuda()); in TEST()
62 EXPECT_TRUE(r.device.is_cuda()); in TEST()
Dcuda_tensor_interop_test.cpp34 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
53 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
72 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
117 ASSERT_TRUE(at_result.is_cuda()); in TEST()
/external/pytorch/aten/src/ATen/native/cuda/
DSummaryOps.cu37 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in getBin()
38 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in getBin()
70 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in C10_LAUNCH_BOUNDS_1()
71 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in C10_LAUNCH_BOUNDS_1()
178 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in CUDA_tensor_histogram()
179 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in CUDA_tensor_histogram()
280 using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; in _bincount_cuda_template()
312 at::acc_type<input_t, /*is_cuda=*/true> min, in _histc_cuda_template()
313 at::acc_type<input_t, /*is_cuda=*/true> max) { in _histc_cuda_template()
396 using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _histc_cuda()
DAmpKernels.cu96 TORCH_CHECK(inv_scale.is_cuda(), "inv_scale must be a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
97 TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
117 TORCH_CHECK(scaled_grads[0].is_cuda(), "scaled_grads must be CUDA tensors."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
133 TORCH_CHECK(t.is_cuda(), "one of scaled_grads was not a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
230 TORCH_CHECK(growth_tracker.is_cuda(), "growth_tracker must be a CUDA tensor."); in _amp_update_scale_cuda_()
231 TORCH_CHECK(current_scale.is_cuda(), "current_scale must be a CUDA tensor."); in _amp_update_scale_cuda_()
232 TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); in _amp_update_scale_cuda_()
DCopy.cu263 TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); in copy_requires_temporaries()
271 } else if (dst_device.is_cuda() && src_device.is_cuda()) { in copy_requires_temporaries()
339 if (dst_device.is_cuda() && src_device.is_cuda()) { in copy_kernel_cuda()
347 if (dst_device.is_cuda() && src_device.is_cpu()) { in copy_kernel_cuda()
350 } else if (dst_device.is_cpu() && src_device.is_cuda()) { in copy_kernel_cuda()
/external/pytorch/aten/src/ATen/native/sparse/cuda/
DSparseCUDATensorMath.cu91 …TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to… in s_addmm_out_sparse_dense_cuda()
92 …TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to… in s_addmm_out_sparse_dense_cuda()
93 …TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'ma… in s_addmm_out_sparse_dense_cuda()
94 …TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2… in s_addmm_out_sparse_dense_cuda()
187 TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
188 TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
189 TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
272 TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
273 TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
274 TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
[all …]
DSparseCsrTensorMath.cu147 TORCH_INTERNAL_ASSERT(dense.is_cuda()); in add_out_dense_sparse_compressed_cuda()
154 output.is_cuda(), in add_out_dense_sparse_compressed_cuda()
159 src.is_cuda(), in add_out_dense_sparse_compressed_cuda()
327 self.is_cuda(), in add_out_sparse_compressed_cuda()
331 other.is_cuda(), in add_out_sparse_compressed_cuda()
335 out.is_cuda(), in add_out_sparse_compressed_cuda()
487 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in reduce_sparse_csr_dim0_cuda_template()
578 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in reduce_sparse_csr_dim1_cuda_template()
676 TORCH_INTERNAL_ASSERT(sparse.is_cuda()); in reduce_sparse_csr_cuda_template()
781 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in _sparse_csr_sum_cuda()
/external/pytorch/test/
Dtest_foreach.py77 def __call__(self, inputs, is_cuda, expect_fastpath, **kwargs): argument
81 is_cuda
142 def is_cuda(self): member in TestForeach
175 is_cuda=self.is_cuda,
184 is_cuda=self.is_cuda,
229 self.is_cuda,
261 actual = op(inputs, self.is_cuda, is_fastpath)
284 actual = op(inputs, self.is_cuda, is_fastpath, **op_kwargs)
344 [rhs_arg, tensors], is_cuda=False, expect_fastpath=False
401 if self.is_cuda:
[all …]
/external/pytorch/test/cpp_extensions/
Dcuda_extension.cpp9 TORCH_CHECK(x.device().is_cuda(), "x must be a CUDA tensor"); in sigmoid_add()
10 TORCH_CHECK(y.device().is_cuda(), "y must be a CUDA tensor"); in sigmoid_add()
Dcuda_dlink_extension.cpp7 TORCH_CHECK(a.device().is_cuda(), "a is a cuda tensor"); in add()
8 TORCH_CHECK(b.device().is_cuda(), "b is a cuda tensor"); in add()
/external/pytorch/torch/csrc/distributed/c10d/quantization/
Dquantization_utils.h26 !x.is_cuda(), \
32 x.is_cuda(), \
/external/pytorch/aten/src/ATen/
DAccumulateType.h14 // using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
76 template <typename T, bool is_cuda>
77 using acc_type = typename AccumulateType<T, is_cuda>::type;
171 TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
DAccumulateType.cpp27 c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda) { in toAccumulateType() argument
28 …return is_cuda ? toAccumulateType(type, c10::DeviceType::CUDA) : toAccumulateType(type, c10::Devic… in toAccumulateType()
/external/pytorch/test/distributed/checkpoint/
Dtest_state_dict_utils.py50 self.assertTrue(gathered_state_dict["dtensor"].is_cuda)
70 self.assertFalse(gathered_state_dict["dtensor"].is_cuda)
86 self.assertFalse(v.is_cuda)
156 self.assertFalse(v.is_cuda)
/external/pytorch/torch/csrc/tensor/
Dpython_tensor.cpp37 bool is_cuda; member
73 !tensor_type.is_cuda || torch::utils::cuda_enabled(), in Tensor_new()
77 if (tensor_type.is_cuda) { in Tensor_new()
126 if (self->is_cuda) { in Tensor_is_cuda()
168 {"is_cuda", (getter)Tensor_is_cuda, nullptr, nullptr, nullptr},
248 type_obj.is_cuda = in set_type()
440 !type->is_cuda || torch::utils::cuda_enabled(), in py_set_default_tensor_type()
/external/pytorch/aten/src/ATen/native/quantized/cuda/
DEmbeddingBag.cu199 TORCH_CHECK(weight.is_cuda()); in embedding_bag_byte_impl()
200 TORCH_CHECK(indices.is_cuda()); in embedding_bag_byte_impl()
201 TORCH_CHECK(offsets.is_cuda()); in embedding_bag_byte_impl()
262 TORCH_CHECK(output.is_cuda()); in embedding_bag_byte_impl()
381 TORCH_CHECK(weight.is_cuda()); in embedding_bag_4bit_impl()
382 TORCH_CHECK(indices.is_cuda()); in embedding_bag_4bit_impl()
383 TORCH_CHECK(offsets.is_cuda()); in embedding_bag_4bit_impl()
444 TORCH_CHECK(output.is_cuda()); in embedding_bag_4bit_impl()
/external/pytorch/
DDockerfile80 RUN IS_CUDA=$(python -c 'import torch ; print(torch.cuda._is_compiled())'); \
81 echo "Is torch compiled with cuda: ${IS_CUDA}"; \
82 if test "${IS_CUDA}" != "True" -a ! -z "${CUDA_VERSION}"; then \
/external/pytorch/torch/csrc/api/include/torch/nn/utils/
Dconvert_parameters.h19 old_param_device = param.is_cuda() ? param.get_device() : -1; in _check_param_device()
22 if (param.is_cuda()) { // Check if in same GPU in _check_param_device()
/external/pytorch/torch/csrc/cuda/shared/
Dcudnn.cpp91 cudnn.attr("is_cuda") = true; in initCudnnBindings()
93 cudnn.attr("is_cuda") = false; in initCudnnBindings()
/external/pytorch/aten/src/ATen/cuda/
DEmptyTensor.cpp15 TORCH_INTERNAL_ASSERT(device.is_cuda()); in empty_cuda()
55 TORCH_INTERNAL_ASSERT(device.is_cuda()); in empty_strided_cuda()
/external/pytorch/torch/csrc/jit/passes/
Dfrozen_conv_add_relu_fusion_cuda.cpp86 if (!weight_t.device().is_cuda() || !weight_t.is_contiguous()) { in fuseFrozenConvAddReluImpl()
96 bias_t.size(0) != weight_t.size(0) || !bias_t.device().is_cuda()) { in fuseFrozenConvAddReluImpl()
109 !z_t.device().is_cuda()) { in fuseFrozenConvAddReluImpl()

12345678910