Home
last modified time | relevance | path

Searched full:masked_fill_ (Results 1 – 25 of 80) sorted by relevance

1234

/external/pytorch/aten/src/ATen/native/quantized/
DTensorAdvancedIndexing.cpp40 "Use of masked_fill_ on expanded tensors is deprecated. " in masked_fill_impl_quantized_cpu()
61 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill__quantized_cpu()
70 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill__quantized_cpu()
71 …TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got te… in masked_fill__quantized_cpu()
86 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill_impl_quantized_cuda()
90 "Use of masked_fill_ on expanded tensors is deprecated. " in masked_fill_impl_quantized_cuda()
96 c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_"); in masked_fill_impl_quantized_cuda()
113 TORCH_CHECK(!self.device().is_cpu(), "masked_fill_: Expected inputs to be on same device") in masked_fill__quantized_cuda()
118 …TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got te… in masked_fill__quantized_cuda()
120 TORCH_CHECK(!self.device().is_cpu(), "masked_fill_: Expected inputs to be on same device") in masked_fill__quantized_cuda()
[all …]
/external/pytorch/aten/src/ATen/native/cuda/
DIndexing.cu1193 counts.masked_fill_(counts == 0, 1); in TORCH_IMPL_FUNC()
1515 kBool, kHalf, kBFloat16, kComplexHalf, iter.common_dtype(), "masked_fill_", [&]() { in masked_fill_kernel()
1542 iter.common_dtype(), "masked_fill_", [&]() { in masked_fill_kernel_quantized()
1559 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill__cuda()
1562 "Use of masked_fill_ on expanded tensors is deprecated. " in masked_fill__cuda()
1568 c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_"); in masked_fill__cuda()
1585 …TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got te… in masked_fill__cuda()
1591 TORCH_CHECK(!self.device().is_cpu(), "masked_fill_: Expected inputs to be on same device") in masked_fill__cuda()
/external/pytorch/aten/src/ATen/native/
DTensorAdvancedIndexing.cpp800 return self.masked_fill_(std::get<1>(masked_fill_dispatch), value.item()); in _index_put_impl_()
1149 counts.masked_fill_(counts == 0, 1); in index_reduce_func_impl()
1204 counts.masked_fill_(counts == 0, 1); in index_reduce_func_impl()
1948 count.masked_fill_(count == 0, 1); in TORCH_IMPL_FUNC()
1986 …TORCH_CHECK(mask.dtype() == ScalarType::Bool, "masked_fill_ only supports boolean masks, but got m… in masked_fill_impl_cpu()
1991 "Use of masked_fill_ on expanded tensors is deprecated. " in masked_fill_impl_cpu()
2010 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill__cpu()
2018 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); in masked_fill__cpu()
2019 …TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got te… in masked_fill__cpu()
2034 result.masked_fill_(mask, source); in masked_fill()
[all …]
DLossNLL.cpp585 smooth_loss.masked_fill_(ignore_mask, 0.0); in cross_entropy_loss_label_smoothing()
597 tgt_weights.masked_fill_(ignore_mask.flatten(), 0).sum(); in cross_entropy_loss_label_smoothing()
/external/pytorch/test/
Dtest_maskedtensor.py38 a = mt_result_data.detach().masked_fill_(~mask, 0)
39 b = t_result.detach().masked_fill_(~mask, 0)
63 a = mt_data1.detach().masked_fill_(~mask, 0)
64 b = mt_data2.detach().masked_fill_(~mask, 0)
/external/pytorch/torch/csrc/jit/passes/
Dremove_inplace_ops.cpp11 {aten::masked_fill_, aten::masked_fill},
/external/pytorch/benchmarks/dynamo/microbenchmarks/operator_inp_logs/torchbench_train/
Dtts_angular_training.txt29 Operator: aten.masked_fill_.Scalar
Dhf_Bart_training.txt53 Operator: aten.masked_fill_.Scalar
/external/pytorch/benchmarks/dynamo/microbenchmarks/
Doperatorbench.py120 "masked_fill_.Scalar",
/external/pytorch/benchmarks/dynamo/microbenchmarks/operator_inp_logs/hf_train/
DPegasusForCausalLM_training.txt47 Operator: aten.masked_fill_.Scalar
DBartForCausalLM_training.txt48 Operator: aten.masked_fill_.Scalar
DPLBartForCausalLM_training.txt48 Operator: aten.masked_fill_.Scalar
DMBartForCausalLM_training.txt48 Operator: aten.masked_fill_.Scalar
DBlenderbotSmallForCausalLM_training.txt48 Operator: aten.masked_fill_.Scalar
DPegasusForConditionalGeneration_training.txt54 Operator: aten.masked_fill_.Scalar
DTrOCRForCausalLM_training.txt48 Operator: aten.masked_fill_.Scalar
DXGLMForCausalLM_training.txt58 Operator: aten.masked_fill_.Scalar
DDebertaV2ForMaskedLM_training.txt54 Operator: aten.masked_fill_.Scalar
DSpeech2Text2ForCausalLM_training.txt50 Operator: aten.masked_fill_.Scalar
DBartForConditionalGeneration_training.txt61 Operator: aten.masked_fill_.Scalar
DM2M100ForConditionalGeneration_training.txt56 Operator: aten.masked_fill_.Scalar
DBlenderbotSmallForConditionalGeneration_training.txt55 Operator: aten.masked_fill_.Scalar
DMBartForConditionalGeneration_training.txt62 Operator: aten.masked_fill_.Scalar
/external/pytorch/aten/src/ATen/functorch/
DBatchRulesBinaryOps.cpp478 …VMAP_SUPPORT2(masked_fill_, Scalar, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorScalarInp… in TORCH_LIBRARY_IMPL()
/external/pytorch/aten/src/ATen/native/mps/operations/
DIndexing.mm663 auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
665 c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
816 "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "

1234