| /external/pytorch/test/inductor/ |
| D | test_auto_functionalize.py | 46 out.copy_(x) 87 o.copy_(all_gather_output) 407 … copy_: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg2_1, arg2_1); arg2_1 = copy_ = None 408 … copy__1: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg5_1, arg5_1); arg5_1 = copy__1 = None 419 copy_: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg1_1, arg1_1); arg1_1 = copy_ = None 420 … copy__1: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg4_1, arg4_1); arg4_1 = copy__1 = None 511 copy_: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg1_1, arg1_1); arg1_1 = copy_ = None 512 … copy__1: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg4_1, arg4_1); arg4_1 = copy__1 = None 571 …copy_: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg1_1, getitem_2); arg1_1 = getitem_2 = co… 572 …copy__1: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg2_1, getitem_1); arg2_1 = getitem_1 = … [all …]
|
| D | s429861_repro.py | 4442 copy_: "f32[50][1]cuda:0" = torch.ops.aten.copy_.default(arg1_1, getitem_7031) 4444 copy__1: "f32[23][1]cuda:0" = torch.ops.aten.copy_.default(arg2_1, getitem_7032) 4446 copy__2: "f32[38][1]cuda:0" = torch.ops.aten.copy_.default(arg3_1, getitem_7033) 4448 copy__3: "f32[5][1]cuda:0" = torch.ops.aten.copy_.default(arg4_1, getitem_7034) 4450 copy__4: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg5_1, getitem_7035) 4452 copy__5: "f32[50][1]cuda:0" = torch.ops.aten.copy_.default(arg6_1, getitem_7036) 4454 copy__6: "f32[77][1]cuda:0" = torch.ops.aten.copy_.default(arg7_1, getitem_7037) 4456 copy__7: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg8_1, getitem_7038) 4458 copy__8: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg9_1, getitem_7039) 4460 copy__9: "f32[96][1]cuda:0" = torch.ops.aten.copy_.default(arg10_1, getitem_7040) [all …]
|
| D | test_inplacing_pass.py | 39 result.copy_(x.sin()) 44 out_sin.copy_(x.sin()) 45 out_cos.copy_(x.cos()) 206 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) 229 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) 253 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1)
|
| D | test_distributed_patterns.py | 30 … # torch.ops.fsdp.set_ doesn't work well in eager mode, so use the slow copy_ path instead. 37 mod.unsharded_weight.copy_(all_gather(mod.sharded_weight)) 62 … # torch.ops.fsdp.set_ doesn't work well in eager mode, so use the slow copy_ path instead. 69 mod.unsharded_weight.copy_(all_gather(mod.sharded_weight)) 199 out.copy_(y.cos()) 225 w.copy_(x + 1) 249 w.copy_(x) 269 w.copy_(x + 1) 287 w.copy_(x)
|
| /external/pytorch/aten/src/ATen/native/cuda/ |
| D | ScanKernels.cpp | 38 values.copy_(*values_); in cummax_helper_cuda() 41 indices.copy_(*indices_); in cummax_helper_cuda() 55 values.copy_(*values_); in cummin_helper_cuda() 58 indices.copy_(*indices_); in cummin_helper_cuda() 81 result.copy_(*result_); in _logcumsumexp_out_cuda() 100 result.copy_(*result_); in cumsum_cuda_kernel() 108 result.copy_(*result_); in cumprod_cuda_kernel()
|
| D | Sort.cpp | 34 t.copy_(rangeview); in fillSliceWithIndex() 78 values.copy_(self); in sort_cuda_kernel() 93 self_.copy_(self); in sort_cuda_kernel() 115 values.copy_(*values_tmp); in sort_cuda_kernel() 118 indices.copy_(*indices_tmp); in sort_cuda_kernel()
|
| D | TensorTopK.cpp | 30 values.copy_(sorted_values.narrow(dim, 0, k)); in topk_out_with_sort() 31 indices.copy_(sorted_indices.narrow(dim, 0, k)); in topk_out_with_sort() 90 indices.copy_(indices.gather(dim, sortedIndices)); in TORCH_IMPL_FUNC() 91 values.copy_(sortedValues); in TORCH_IMPL_FUNC()
|
| D | Copy.cu | 145 AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { in direct_copy_kernel_cuda() 151 TORCH_CHECK(dtype == iter.dtype(1), "copy_() does not support casting " in direct_copy_kernel_cuda() 153 AT_DISPATCH_BIT_TYPES(dtype, "copy_", [&] { in direct_copy_kernel_cuda() 158 dtype, "copy_", AT_WRAP([&] { in direct_copy_kernel_cuda() 328 dst_contig.copy_(src_contig, non_blocking); in copy_kernel_cuda() 333 dst.copy_(dst_contig, non_blocking); in copy_kernel_cuda() 354 TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); in copy_kernel_cuda()
|
| /external/pytorch/test/ |
| D | test_functionalization.py | 70 inpt.copy_(inpt_new) 187 y.copy_(x) 298 copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = copy_ = None 319 copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = copy_ = None 513 copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None 532 copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None 561 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_5); arg0_1 = getitem_5 = copy_ = None 585 copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None 605 copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None 739 copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = copy_ = None [all …]
|
| /external/executorch/examples/models/llama/source_transformation/ |
| D | quantized_kv_cache.py | 125 narrowed_k.copy_(quantized_k_val) 126 narrowed_k_scales.copy_(k_scales) 127 narrowed_k_zp.copy_(k_zero_points) 135 narrowed_v.copy_(quantized_v_val) 136 narrowed_v_scales.copy_(v_scales) 137 narrowed_v_zp.copy_(v_zero_points)
|
| /external/pytorch/aten/src/ATen/native/ |
| D | Copy.cpp | 88 _AT_DISPATCH_CP_TYPES(self.scalar_type(), "copy_", [&] { in copy_same_type_transpose_() 130 // (e.g. XLA) may be supported by overriding copy_ and _copy_from. 221 // cpu_tensor.copy_(xla_tensor) => xla_tensor._copy_from(cpu_tensor) in copy_impl() 222 // xla_tensor.copy_(cpu_tensor) => cpu_tensor._copy_from(xla_tensor) in copy_impl() 247 return vulkan::ops::copy_(self, src); in copy_impl() 317 r.copy_(src, non_blocking); in copy_meta() 323 …// copy() is the "functional" form of copy_(). It exists so we can properly functionalize copy_(),… in copy() 329 // that copy_() will fully overwrite all data with that of src in copy() 335 r.copy_(src, non_blocking); in copy() 353 Tensor& copy_(Tensor& self, const Tensor& src, bool non_blocking) { in copy_() function
|
| D | AffineGridGenerator.cpp | 39 base_grid.select(-1, 0).copy_(linspace_from_neg_one(theta, W, align_corners)); in make_base_grid_4D() 40 base_grid.select(-1, 1).copy_(linspace_from_neg_one(theta, H, align_corners).unsqueeze_(-1)); in make_base_grid_4D() 56 base_grid.select(-1, 0).copy_(linspace_from_neg_one(theta, W, align_corners)); in make_base_grid_5D() 57 base_grid.select(-1, 1).copy_(linspace_from_neg_one(theta, H, align_corners).unsqueeze_(-1)); in make_base_grid_5D() 58 …base_grid.select(-1, 2).copy_(linspace_from_neg_one(theta, D, align_corners).unsqueeze_(-1).unsque… in make_base_grid_5D()
|
| D | Fill.cpp | 42 // Trust the `copy_` to handle the quantization and the boundary checks. in fill_out_quantized() 43 self.copy_(out); in fill_out_quantized() 63 self.copy_(value.clone()); in fill_() 65 self.copy_(value); in fill_()
|
| /external/pytorch/torch/ao/quantization/fx/_model_report/ |
| D | model_report_observer.py | 101 self.epoch_activation_min.copy_(epoch_min_val) 102 self.epoch_activation_max.copy_(epoch_max_val) 147 self.min_val.copy_(min_val) 148 self.max_val.copy_(max_val) 247 self.percentile_batches_tracked.copy_(new_number_of_batches) 248 self.average_percentile_ratio.copy_(new_ratios) 249 self.constant_channels.copy_(new_constant_count)
|
| /external/pytorch/aten/src/ATen/test/ |
| D | undefined_tensor_test.cpp | 55 // copy_ in TEST() 57 ASSERT_ANY_THROW(und.copy_(und)); in TEST() 59 ASSERT_ANY_THROW(und.copy_(ft)); in TEST() 61 ASSERT_ANY_THROW(ft.copy_(und)); in TEST()
|
| /external/pytorch/torch/_inductor/fx_passes/ |
| D | reinplace.py | 76 tmp.copy_(src) 145 slice2.copy_(src) 159 graph_call_function(graph, aten.copy_.default, tmp, src) 196 user.target is aten.copy_.default and user.args[0] is inp for user in node.users 232 tmp.copy_(src) 382 be inplaced if the above condition is true and there's a copy_ in the 391 # maps argument to the first copy_ node that mutates it. 399 if node.target == aten.copy_.default and node.args[0].op in ( 437 # Ignore uses after the copy_ epilogue node, where the input 446 # mutated_arg.copy_(other) [all …]
|
| D | README.md | 10 …acing and before Inductor, we have no mutation in our graph, except for a copy_ epilogue at the en… 23 In addition, AOTDispatch can introduce a copy_ epilogue into the graph. For example, we may have a … 27 x.copy_(y)
|
| /external/google-cloud-java/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/ |
| D | ModelSourceInfo.java | 333 private boolean copy_ = false; field in ModelSourceInfo 349 return copy_; in getCopy() 372 if (copy_ != false) { in writeTo() 373 output.writeBool(2, copy_); in writeTo() 390 if (copy_ != false) { in getSerializedSize() 391 size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, copy_); in getSerializedSize() 566 copy_ = false; in clear() 607 result.copy_ = copy_; in buildPartial0() 696 copy_ = input.readBool(); in mergeFrom() 812 private boolean copy_; field in ModelSourceInfo.Builder [all …]
|
| /external/google-cloud-java/java-aiplatform/proto-google-cloud-aiplatform-v1beta1/src/main/java/com/google/cloud/aiplatform/v1beta1/ |
| D | ModelSourceInfo.java | 335 private boolean copy_ = false; field in ModelSourceInfo 351 return copy_; in getCopy() 374 if (copy_ != false) { in writeTo() 375 output.writeBool(2, copy_); in writeTo() 392 if (copy_ != false) { in getSerializedSize() 393 size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, copy_); in getSerializedSize() 568 copy_ = false; in clear() 609 result.copy_ = copy_; in buildPartial0() 699 copy_ = input.readBool(); in mergeFrom() 821 private boolean copy_; field in ModelSourceInfo.Builder [all …]
|
| /external/pytorch/torch/optim/ |
| D | rprop.py | 266 sign.copy_(torch.where(sign.gt(0), etaplus, sign)) 267 sign.copy_(torch.where(sign.lt(0), etaminus, sign)) 268 sign.copy_(torch.where(sign.eq(0), 1, sign)) 281 grad.copy_(torch.where(sign.eq(etaminus), 0, grad)) 287 prev.copy_(grad) 368 sign.copy_(torch.where(sign.gt(0), etaplus, sign)) 369 sign.copy_(torch.where(sign.lt(0), etaminus, sign)) 370 sign.copy_(torch.where(sign.eq(0), 1, sign)) 386 grouped_grads[i].copy_(
|
| /external/pytorch/torch/onnx/_internal/fx/passes/ |
| D | functionalization.py | 54 a.copy_(new_a) 136 """Remove `aten.copy_.default` nodes that mutate module inputs. 139 ``Functionalization`` pass adds `aten.copy_.default` nodes to the graph 148 and node.target == torch.ops.aten.copy_.default
|
| /external/pytorch/torch/csrc/autograd/ |
| D | autograd_meta.cpp | 29 // foo.copy_(bar) 33 // view.copy_(bar) 37 // foo.copy_(bar) 67 // view.copy_(bar) 73 // base.copy_(bar) 242 new_fw_grad_value.copy_(new_grad); in set_fw_grad() 262 res.copy_(new_grad); in set_fw_grad()
|
| D | VariableTypeManual.cpp | 190 Tensor& copy_( in copy_() function 210 at::redispatch::copy_( in copy_() 222 new_fw_grad = self_fw_grad.copy_(src_fw_grad); in copy_() 362 "copy_", in TORCH_LIBRARY_IMPL() 363 torch::dispatch(DispatchKey::Autograd, TORCH_FN(VariableType::copy_))); in TORCH_LIBRARY_IMPL() 384 static Tensor& copy_( in copy_() function 391 at::redispatch::copy_( in copy_() 536 "copy_", in TORCH_LIBRARY_IMPL() 538 DispatchKey::ADInplaceOrView, TORCH_FN(ADInplaceOrView::copy_))); in TORCH_LIBRARY_IMPL()
|
| /external/executorch/kernels/portable/cpu/ |
| D | op_copy.cpp | 64 Tensor& copy_( in copy_() function 80 static constexpr const char op_name[] = "copy_"; in copy_() 82 ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "copy_", CTYPE, [&]() { in copy_()
|
| /external/pytorch/torch/testing/_internal/distributed/ |
| D | multi_threaded_pg.py | 77 output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) 92 output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_( 139 data[src_rank][i].copy_(res.to(data[src_rank][i].device)) 153 dest_tensor.copy_(src_tensor) 172 dest_tensor.copy_(src_in_tensors[rank]) 189 dest_tensor.copy_(src_in_tensor_list[0]) 210 dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) 230 out_tensor_list[j].copy_(in_tensor_list[j])
|