Home
last modified time | relevance | path

Searched full:fill_ (Results 1 – 25 of 290) sorted by relevance

12345678910>>...12

/external/pytorch/test/onnx/
Dtest_models.py71 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
75 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
101 x = Variable(torch.randn(1, 3, 224, 224).fill_(1.0))
108 x = Variable(torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0))
112 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
116 x = Variable(torch.randn(BATCH_SIZE, 1, 28, 28).fill_(1.0))
122 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
128 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
134 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
140 x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
[all …]
/external/pytorch/aten/src/ATen/native/
DFill.cpp47 Tensor& fill_(Tensor& self, const Scalar& value) { in fill_() function
55 Tensor& fill_(Tensor& self, const Tensor& value) { in fill_() function
56 …TORCH_CHECK(value.dim() == 0, "fill_ only supports 0-dimension value tensor but got tensor with ",… in fill_()
71 …TORCH_CHECK(value.dim() == 0, "fill_ only supports 0-dimension value tensor but got tensor with ",… in fill_quantized_()
80 …TORCH_CHECK(value.dim() == 0, "fill_ only supports 0-dimension value tensor but got tensor with ",… in fill_meta_()
85 return at::empty_like(self).fill_(value); in fill()
89 return at::empty_like(self).fill_(value); in fill()
125 main_diag.fill_(fill_value); in fill_diagonal_()
137 wrap_diag.fill_(fill_value); in fill_diagonal_()
146 return self.fill_(0); in zero_cpu_()
[all …]
/external/pytorch/aten/src/ATen/native/cpu/
DReduceAllOpsKernel.cpp42 output.fill_(result); in reduce_all_impl_vec()
62 output.fill_(result); in reduce_all_impl()
74 result.fill_(result_data); in min_all_kernel_impl()
99 result.fill_(result_data); in max_all_kernel_impl()
137 output1.fill_(result.first); in reduce_all_impl_two_outputs()
138 output2.fill_(result.second); in reduce_all_impl_two_outputs()
166 output1.fill_(result.first); in reduce_all_impl_vec_two_outputs()
167 output2.fill_(result.second); in reduce_all_impl_vec_two_outputs()
184 min_result.fill_(min_result_data); in aminmax_allreduce_kernel()
185 max_result.fill_(max_result_data); in aminmax_allreduce_kernel()
/external/pytorch/test/distributed/fsdp/
Dtest_fsdp_apply.py40 m.weight.fill_(1.0)
41 m.bias.fill_(1.0)
56 fsdp, lambda param: torch.empty_like(param).fill_(1.0), self.assertNotEqual
63 fsdp, lambda param: torch.empty_like(param).fill_(1.0), self.assertEqual
/external/pytorch/benchmarks/operator_benchmark/pt/
Dfill_test.py7 """Microbenchmark for Fill_ operator."""
44 self.set_module_name("fill_")
47 return input_one.fill_(10)
/external/sdv/vsomeip/third_party/boost/format/include/boost/format/
Dinternals.hpp53 Ch fill_; member
108 // If a locale is available, set it first. "os.fill(fill_);" may chrash otherwise. in apply_on()
122 if(fill_ != 0) in apply_on()
123 os.fill(fill_); in apply_on()
135 fill_ = os.fill(); in set_by_stream()
155 fill_=fill; // default is widen(' '), but we cant compute it without the locale in reset()
187 fmtstate_.fill_='0'; in compute_states()
/external/pytorch/test/inductor/
Dtest_extension_backend.py130 x = torch.empty(2, 16).to(device=device).fill_(1)
132 y = torch.empty(2, 16).to(device=device).fill_(2)
133 z = torch.empty(2, 16).to(device=device).fill_(3)
134 ref = torch.empty(2, 16).fill_(5)
/external/pytorch/aten/src/ATen/
DTensorOperators.h19 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).sub_(y)) \
22 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).div_(y)) \
25 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).remainder_(y)) \
/external/pytorch/test/dynamo/
Dtest_backends.py57 a = torch.empty(2).fill_(1)
58 b = torch.empty(2).fill_(2)
59 c = torch.empty(2).fill_(3)
89 a = torch.empty(2).fill_(1)
90 b = torch.empty(2).fill_(2)
91 c = torch.empty(2).fill_(3)
Dtest_verify_correctness.py73 a = torch.empty(2).fill_(1)
74 b = torch.empty(2).fill_(2)
75 c = torch.empty(2).fill_(3)
Dtest_cudagraphs.py168 x.fill_(2)
186 x.fill_(2)
187 y.fill_(3)
/external/tensorflow/tensorflow/compiler/mlir/tfrt/tests/jit/
Dtf_jitrt_fuse_fill_into_tiled_reduction.mlir22 outs (%fill_ = %fill: tensor<?xf32>)
33 %13 = tensor.extract_slice %fill_[%i] [%12] [1]
45 %15 = tensor.insert_slice %14 into %fill_[%i] [%12] [1]
115 outs (%fill_ = %fill: tensor<8xf32>)
119 %4 = tensor.extract_slice %fill_[%i] [4] [1]
130 %6 = tensor.insert_slice %5 into %fill_[%i] [4] [1]
159 outs (%fill_ = %fill: tensor<?xf32>)
166 %10 = tensor.extract_slice %fill_[%j] [%9] [1]
177 %12 = tensor.insert_slice %11 into %fill_[%j] [%9] [1]
/external/pytorch/test/profiler/
Dtest_profiler_tree.py278 aten::fill_
286 aten::fill_
345 aten::fill_
356 aten::fill_
385 aten::fill_
396 aten::fill_
466 aten::fill_
501 aten::fill_
586 aten::fill_
633 aten::fill_
[all …]
/external/pytorch/aten/src/ATen/native/mps/operations/
DBitwiseOps.mm253 output.fill_(c10::Scalar(self.item<int64_t>() & other.item<int64_t>()));
255 output.fill_(c10::Scalar(self.item<int64_t>() | other.item<int64_t>()));
257 output.fill_(c10::Scalar(self.item<int64_t>() ^ other.item<int64_t>()));
259 output.fill_(c10::Scalar(self.item<int64_t>() << other.item<int64_t>()));
261 output.fill_(c10::Scalar(self.item<int64_t>() >> other.item<int64_t>()));
299 output.fill_(c10::Scalar(static_cast<uint8_t>(~self.item<uint8_t>())));
301 output.fill_(c10::Scalar(~self.item<int64_t>()));
/external/pytorch/test/distributed/
Dtest_symmetric_memory.py95 buf.fill_(42)
104 buf.fill_(43)
386 ).fill_(self.rank)
422 chunks[r].fill_(r)
455 ).fill_(1)
490 ).fill_(0)
499 x.fill_(1)
/external/pytorch/test/jit/
Dtest_remove_mutation.py143 y.fill_(3)
149 FileCheck().check_not("aten::zero_").check_not("aten::fill_").run(graph)
157 x.fill_(y)
163 FileCheck().check_not("aten::fill_").run(graph)
308 inp.fill_(10)
/external/pytorch/test/error_messages/
Dstorage.py73 "fill_ with invalid type", lambda: torch.IntStorage(10).fill_("asdf"), "str"
/external/pytorch/aten/src/ATen/native/nested/cuda/
DNestedTensorTransformerUtils.cpp358 output_shape.select(1, -1).fill_(head_dim_v); in sdpa_nested_preprocessing_with_broadcast()
361 output_shape.select(1, 1).fill_(output_num_heads); in sdpa_nested_preprocessing_with_broadcast()
366 output_shape.select(1, 0).fill_(q_t.size(1)); in sdpa_nested_preprocessing_with_broadcast()
367 output_shape.select(1, 1).fill_(output_num_heads); in sdpa_nested_preprocessing_with_broadcast()
368 output_shape.select(1, 2).fill_(head_dim_v); in sdpa_nested_preprocessing_with_broadcast()
443 output_shape.select(1, -1).fill_(head_dim_v); in sdpa_nested_preprocessing()
536 output_shape.select(1, -1).fill_(head_dim_v); in sdpa_nested_preprocessing_backward()
/external/pytorch/torch/ao/nn/quantized/modules/
Dutils.py48 qw_int_max = torch.clone(qweight.int_repr()).fill_(max_)
49 qw_int_min = torch.clone(qweight.int_repr()).fill_(min_)
/external/cronet/stable/third_party/abseil-cpp/absl/random/internal/
Diostream_state_saver.h56 fill_(os.fill(fill)), in ostream_state_saver()
64 os_.fill(fill_); in ~ostream_state_saver()
71 const fill_type fill_; variable
/external/abseil-cpp/absl/random/internal/
Diostream_state_saver.h56 fill_(os.fill(fill)), in ostream_state_saver()
64 os_.fill(fill_); in ~ostream_state_saver()
71 const fill_type fill_; variable
/external/openscreen/third_party/abseil/src/absl/random/internal/
Diostream_state_saver.h56 fill_(os.fill(fill)), in ostream_state_saver()
64 os_.fill(fill_); in ~ostream_state_saver()
71 const fill_type fill_; variable
/external/cronet/tot/third_party/abseil-cpp/absl/random/internal/
Diostream_state_saver.h56 fill_(os.fill(fill)), in ostream_state_saver()
64 os_.fill(fill_); in ~ostream_state_saver()
71 const fill_type fill_; variable
/external/angle/third_party/abseil-cpp/absl/random/internal/
Diostream_state_saver.h60 fill_(os.fill(fill)), in ostream_state_saver()
68 os_.fill(fill_); in ~ostream_state_saver()
75 const fill_type fill_; variable
/external/rust/android-crates-io/crates/grpcio-sys/grpc/third_party/abseil-cpp/absl/random/internal/
Diostream_state_saver.h56 fill_(os.fill(fill)), in ostream_state_saver()
64 os_.fill(fill_); in ~ostream_state_saver()
71 const fill_type fill_; variable

12345678910>>...12