| /external/pytorch/test/distributed/pipelining/ |
| D | test_backward.py | 26 x.requires_grad_(True) 32 ref_x = x.detach().requires_grad_(x.requires_grad) 65 x.requires_grad_(True) 71 ref_x = x.detach().requires_grad_(x.requires_grad) 100 x.requires_grad_(True) 106 ref_x = x.detach().requires_grad_(x.requires_grad) 144 x.requires_grad_(True) 153 ref_inputs.append(x.detach().requires_grad_(x.requires_grad))
|
| /external/pytorch/torch/csrc/inductor/aoti_eager/ |
| D | kernel_meta_info.cpp | 15 requires_grad_(src_tensor.requires_grad()) {} in TensorMetadata() 31 requires_grad_(requires_grad) { in TensorMetadata() 57 requires_grad_, in build_guard() 84 other.requires_grad_ /* Should we need to care about grad requirement?*/); in operator ==() 90 this->requires_grad_ == other.requires_grad_ && in operator ==() 111 stream << "requires_grad_: " << tensor_metadata.requires_grad_ << '\n'; in operator <<()
|
| /external/pytorch/functorch/examples/maml_regression/ |
| D | evjang.py | 30 torch.Tensor(40, 1).uniform_(-1.0, 1.0).requires_grad_(), 31 torch.Tensor(40).zero_().requires_grad_(), 34 .requires_grad_(), 35 torch.Tensor(40).zero_().requires_grad_(), 38 .requires_grad_(), 39 torch.Tensor(1).zero_().requires_grad_(),
|
| D | evjang_transforms.py | 31 torch.Tensor(40, 1).uniform_(-1.0, 1.0).requires_grad_(), 32 torch.Tensor(40).zero_().requires_grad_(), 35 .requires_grad_(), 36 torch.Tensor(40).zero_().requires_grad_(), 39 .requires_grad_(), 40 torch.Tensor(1).zero_().requires_grad_(),
|
| /external/pytorch/test/autograd/ |
| D | test_complex.py | 11 y = x.detach().requires_grad_(True) 27 y = x.detach().requires_grad_(True) 47 y = x.detach().requires_grad_(True) 78 x.requires_grad_(True) 101 z1 = z.clone().detach().requires_grad_(True)
|
| /external/pytorch/test/ |
| D | test_mkldnn.py | 45 cpu_tensor.requires_grad_() 204 root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_() 216 root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_() 256 x1.requires_grad_() 257 x2.requires_grad_() 332 x_ref = x_lower.clone().float().detach().requires_grad_() 333 x_lower.requires_grad_() 384 x1.requires_grad_() 385 x2.requires_grad_() 498 x.requires_grad_() [all …]
|
| /external/pytorch/torch/testing/_internal/opinfo/definitions/ |
| D | sparse.py | 105 return x.detach().clone().requires_grad_(requires_grad) 159 .requires_grad_(requires_grad), 176 inp.requires_grad_(requires_grad), 190 .requires_grad_(requires_grad), 384 .requires_grad_(requires_grad), 390 .requires_grad_(requires_grad), 396 .requires_grad_(requires_grad), 402 .requires_grad_(requires_grad), 408 .requires_grad_(requires_grad), 416 .requires_grad_(requires_grad), [all …]
|
| /external/pytorch/test/inductor/ |
| D | test_decompose_mem_bound_mm.py | 89 mat1 = torch.randn(b, m, k, device=GPU_TYPE).requires_grad_(True) 90 mat2 = torch.randn(b, k, n, device=GPU_TYPE).requires_grad_(True) 127 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 174 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 218 mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 219 mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True) 258 mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 259 mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True) 294 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
|
| /external/pytorch/test/distributions/ |
| D | test_distributions.py | 178 "concentration1": torch.randn(2, 3).exp().requires_grad_(), 179 "concentration0": torch.randn(2, 3).exp().requires_grad_(), 182 "concentration1": torch.randn(4).exp().requires_grad_(), 183 "concentration0": torch.randn(4).exp().requires_grad_(), 290 {"df": torch.randn(2, 3).exp().requires_grad_()}, 291 {"df": torch.randn(1).exp().requires_grad_()}, 297 {"df": torch.randn(2, 3).exp().requires_grad_()}, 298 {"df": torch.randn(1).exp().requires_grad_()}, 304 {"concentration": torch.randn(2, 3).exp().requires_grad_()}, 305 {"concentration": torch.randn(4).exp().requires_grad_()}, [all …]
|
| D | test_transforms.py | 224 x = generate_data(transform).requires_grad_() 236 x = generate_data(transform).requires_grad_() 338 x = generate_data(transform).requires_grad_() 349 x = generate_data(transform).requires_grad_() 355 y = generate_data(transform.inv).requires_grad_() 366 y = generate_data(transform.inv).requires_grad_() 372 x = generate_data(transform).requires_grad_() 384 x = generate_data(transform).requires_grad_() 566 x = generate_data(transform).requires_grad_()
|
| /external/pytorch/benchmarks/functional_autograd_benchmark/ |
| D | ppl_models.py | 22 beta_value.requires_grad_(True) 61 nu_value.requires_grad_(True) 65 sigma_unconstrained_value.requires_grad_(True) 68 beta_value.requires_grad_(True)
|
| /external/pytorch/test/dynamo/ |
| D | test_pre_dispatch.py | 18 a_test = a_ref.clone().detach().requires_grad_(True) 41 a_test = a_ref.clone().detach().requires_grad_(True) 61 a_test = a_ref.clone().detach().requires_grad_(True)
|
| D | test_aot_autograd_cache.py | 166 a2 = a.detach().clone().requires_grad_(True) 167 b2 = b.detach().clone().requires_grad_(True) 214 a2 = a.detach().clone().requires_grad_(True) 215 b2 = b.detach().clone().requires_grad_(True) 240 a2 = a.detach().clone().requires_grad_(True) 241 b2 = b.detach().clone().requires_grad_(True) 264 a2 = a.detach().clone().requires_grad_(True) 265 b2 = b.detach().clone().requires_grad_(True) 283 a2 = a.detach().clone().requires_grad_(True) 284 b2 = b.detach().clone().requires_grad_(True) [all …]
|
| /external/pytorch/test/nn/ |
| D | test_pooling.py | 223 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 227 ref_input = input.detach().clone().contiguous().requires_grad_(True) 249 input = input[:, ::2, :, :].requires_grad_() 254 ref_input = input.detach().clone().contiguous().requires_grad_(True) 273 input = input.to(device).to(memory_format=memory_format).requires_grad_() 276 input2 = input.detach().clone().to(dtype=dtype).requires_grad_(True) 319 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 324 ref_input = input.detach().clone().contiguous().requires_grad_(True) 344 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 347 ref_input = input.detach().clone().contiguous().requires_grad_(True) [all …]
|
| D | test_dropout.py | 34 input_var = input.detach().clone().requires_grad_() 71 x = torch.randn(5).to(device=current_device).requires_grad_() 72 x_ref = x.detach().requires_grad_() 100 input_var = input.clone(memory_format=memory_format).requires_grad_() 109 input_var = input.clone(memory_format=memory_format).requires_grad_()
|
| /external/pytorch/test/distributed/fsdp/ |
| D | test_wrap.py | 75 self.embed_tokens.weight.requires_grad_(False) 76 self.norm.weight.requires_grad_(False) 77 self.norm.bias.requires_grad_(False) 87 self.inp_layernorm.weight.requires_grad_(False) 88 self.inp_layernorm.bias.requires_grad_(False) 89 self.post_attn_layernorm.weight.requires_grad_(False) 90 self.post_attn_layernorm.bias.requires_grad_(False) 102 self.q_proj.weight.requires_grad_(False) 103 self.k_proj.weight.requires_grad_(False) 104 self.v_proj.weight.requires_grad_(False) [all …]
|
| /external/pytorch/test/distributed/_tensor/ |
| D | test_convolution_ops.py | 69 inp = input_list[i].to(self.device_type).requires_grad_() 83 inp = input_list[i].to(self.device_type).requires_grad_() 139 inp = input_list[i].to(self.device_type).requires_grad_() 153 inp = input_list[i].to(self.device_type).requires_grad_()
|
| /external/pytorch/test/expect/ |
| D | TestSparseCPU.test_print_coalesced_cpu_float64.expect | 20 # after requires_grad_ 52 # after requires_grad_ 92 # after requires_grad_ 136 # after requires_grad_ 176 # after requires_grad_ 208 # after requires_grad_ 242 # after requires_grad_
|
| D | TestSparseCPU.test_print_uncoalesced_cpu_float64.expect | 20 # after requires_grad_ 52 # after requires_grad_ 92 # after requires_grad_ 136 # after requires_grad_ 176 # after requires_grad_ 208 # after requires_grad_ 242 # after requires_grad_
|
| D | TestSparseCUDA.test_print_coalesced_cuda_float64.expect | 21 # after requires_grad_ 57 # after requires_grad_ 98 # after requires_grad_ 144 # after requires_grad_ 188 # after requires_grad_ 224 # after requires_grad_ 259 # after requires_grad_
|
| D | TestSparseCUDA.test_print_uncoalesced_cuda_float64.expect | 21 # after requires_grad_ 57 # after requires_grad_ 98 # after requires_grad_ 144 # after requires_grad_ 188 # after requires_grad_ 224 # after requires_grad_ 259 # after requires_grad_
|
| /external/pytorch/benchmarks/sparse/dlmc/ |
| D | utils.py | 207 x.requires_grad_(True) 208 y.requires_grad_(True) 211 dx.requires_grad_(True) 212 dy.requires_grad_(True)
|
| /external/pytorch/c10/core/ |
| D | DefaultTensorOptions.h | 27 return requires_grad_; in requires_grad() 37 bool requires_grad_ = false; // 8-bit member
|
| /external/pytorch/test/distributed/_composable/fsdp/ |
| D | test_fully_shard_frozen.py | 67 param.requires_grad_(False) 86 param.requires_grad_(False) 176 param.requires_grad_(requires_grad) 231 self.layer_no_grad.requires_grad_(False)
|
| /external/pytorch/aten/src/ATen/native/ |
| D | VariableMethodStubs.cpp | 51 Tensor& requires_grad_(Tensor& self, bool _requires_grad) { in requires_grad_() function 52 self.requires_grad_(_requires_grad); in requires_grad_()
|