/external/executorch/backends/qualcomm/tests/ |
D | test_qnn_delegate.py | 81 sample_input = (torch.randn(5),) 82 self.lower_module_and_test_output(module, sample_input) 86 sample_input = (torch.randn(1, 3, 2, 2),) 87 self.lower_module_and_test_output(module, sample_input) 91 sample_input = (torch.randn([4, 32, 16, 16]),) 92 self.lower_module_and_test_output(module, sample_input) 97 sample_input = (torch.randn([4, 8, 32]), torch.randn([4, 32, 8])) 98 self.lower_module_and_test_output(module, sample_input) 102 sample_input = (10 * torch.rand((9, 4, 5, 3)),) 103 self.lower_module_and_test_output(module, sample_input) [all …]
|
/external/pytorch/torch/testing/_internal/opinfo/definitions/ |
D | _masked.py | 100 for sample_input in sample_inputs_reduction( 104 sample_input.input.shape, device, **kwargs 106 sample_input_args, sample_input_kwargs = sample_input.args, dict( 107 mask=mask, **sample_input.kwargs 110 sample_input.input.detach().requires_grad_(requires_grad), 117 and sample_input.input.ndim == 2 119 and mask.shape == sample_input.input.shape 122 t = sample_input.input.detach() 139 for sample_input in sample_inputs_masked_reduction( 142 mask = sample_input.kwargs.get("mask") [all …]
|
D | sparse.py | 21 sample.sample_input.input, 22 *sample.sample_input.args, 23 **sample.sample_input.kwargs, 108 for sample_input in sample_inputs( 111 yield sample_input.transform(apply_requires_grad) 129 for sample_input in sample_inputs_reduction( 132 if sample_input.input.ndim == 0: 142 if sample_input.input.ndim < 2: 146 if sample_input.input.ndim > 2 and (sample_input.input == 0).any(): 157 sample_input.input.detach() [all …]
|
/external/pytorch/test/mobile/ |
D | test_lite_script_type.py | 25 sample_input = torch.tensor(5) 27 script_module_result = script_module(sample_input).a 36 mobile_module_result = mobile_module(sample_input).a 88 sample_input = torch.tensor(5) 91 script_module_result = script_module(sample_input) 96 mobile_module_result = mobile_module(sample_input) 112 sample_input = torch.tensor(5) 114 script_module_result = script_module(sample_input) 119 mobile_module_result = mobile_module(sample_input) 129 sample_input = torch.Tensor(1) [all …]
|
D | test_bytecode.py | 363 sample_input = torch.tensor([1]) 366 script_module_result = script_module(sample_input)
|
/external/pytorch/torch/utils/benchmark/utils/ |
D | compile.py | 42 sample_input: Union[torch.Tensor, Any], 64 …globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn}, 76 sample_input: Union[torch.Tensor, Any], 93 compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn) 95 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) 110 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) 121 sample_input: Union[torch.Tensor, Any], 148 _, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer) 165 model, sample_input, num_iters, backend, mode, optimizer, loss_fn) 180 model, sample_input, num_iters, backend, None, optimizer, loss_fn)
|
/external/pytorch/test/ |
D | test_expanded_weights.py | 217 def _compare_ew_and_for_loop_per_sample_grads(self, op, sample_input, reduction): argument 218 input = sample_input.input 219 args = sample_input.args 220 kwargs = sample_input.kwargs 226 sample_input, batch_size, loss_reduction 268 for sample_input in supported_inputs(op, sample_inputs): 272 sample_input = SampleInput( 273 sample_input.args[0], 274 args=(sample_input.input,), 275 kwargs=sample_input.kwargs, [all …]
|
D | test_masked.py | 233 for sample_input in sample_inputs_func(device, dtype): 234 mask = sample_input.kwargs.get('mask') 236 yield sample_input 238 if layout == sample_input.input.layout: 239 yield sample_input 241 sample_input_kwargs = sample_input.kwargs.copy() 243 yield SampleInput(sample_input.input.clone(), 244 args=sample_input.args, 247 sample_input_kwargs = sample_input.kwargs.copy() 249 yield SampleInput(sample_input.input.clone(), [all …]
|
/external/executorch/exir/tests/ |
D | test_memory_format_ops_pass.py | 48 sample_input=(torch.randn([3, 4, 5], dtype=torch.float32),), 59 sample_input=(torch.randn([3, 4, 5, 6], dtype=torch.float32),), 70 sample_input=( 87 sample_input=( 234 sample_input = ( 242 export(toy_model, sample_input), 263 expected = before_epm.exported_program().module()(*sample_input) 264 actual = updated_epm.exported_program().module()(*sample_input) 276 sample_input=(torch.randn(1, 3, 224, 224),), 291 sample_input=(torch.randn(1, 3, 224, 224),), [all …]
|
D | test_memory_format_ops_pass_aten.py | 31 sample_input=(torch.randn([3, 4, 5], dtype=torch.float32),), 42 sample_input=(torch.randn([3, 4, 5, 6], dtype=torch.float32),), 53 sample_input=( 70 sample_input=( 88 sample_input=(torch.randn(1, 3, 224, 224),), 103 sample_input=(torch.randn(1, 3, 224, 224),),
|
D | test_memory_format_ops_pass_utils.py | 32 sample_input: Tuple[Any, ...] 72 before = export(test_set.module, test_set.sample_input).run_decompositions({}) 103 expected = before.module()(*test_set.sample_input) 104 actual = epm.exported_program().module()(*test_set.sample_input) 125 inputs_flattened = tree_flatten(test_set.sample_input)[0]
|
/external/pytorch/test/jit/ |
D | test_save_load_for_op_version.py | 72 sample_input=st.tuples( 78 def test_versioned_div_tensor(self, sample_input): argument 104 for val_a, val_b in product(sample_input, sample_input): 125 sample_input=st.tuples( 131 def test_versioned_div_tensor_inplace(self, sample_input): argument 152 for val_a, val_b in product(sample_input, sample_input): 175 sample_input=st.tuples( 181 def test_versioned_div_tensor_out(self, sample_input): argument 205 for val_a, val_b in product(sample_input, sample_input): 232 sample_input=st.tuples( [all …]
|
D | test_dtype_analysis.py | 278 sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[ 281 input_args = [sample_input.input, *sample_input.args] 339 sample_input = first_sample(self, samples) 340 input_args = [sample_input.input, *sample_input.args] 341 expected_res = op(*input_args, **sample_input.kwargs) 353 traced_fn(sample_input.input, *sample_input.args, **sample_input.kwargs) 359 v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)
|
/external/pytorch/test/dynamo/ |
D | test_fx_passes_pre_grad.py | 24 sample_input = torch.randn(4, 4) 26 m(sample_input) 27 exported_program = torch.export.export(m, (sample_input,)) 30 pass_execution_and_save(fx_pass, gm, sample_input, "Apply testing pass")
|
/external/executorch/backends/vulkan/_passes/ |
D | test_custom_ops.py | 104 sample_input = (torch.rand(2, 5, 2, 3), 4, 0.5) 105 custom_out = model(*sample_input) 117 expected_out = calculate_expected_output(*sample_input)
|
/external/pytorch/test/jit/xnnpack/ |
D | test_xnnpack_delegate.py | 36 sample_input = torch.randn(4, 4, 4) 37 actual_output = scripted_module(sample_input) 38 expected_output = lowered_module(sample_input)
|
/external/executorch/exir/dialects/edge/spec/ |
D | gen.py | 16 from executorch.exir.dialects.edge.op.sample_input import SAMPLE_INPUT 165 sample_input = next( 174 sample_input = next( 179 sample_args = [sample_input.input] + list(sample_input.args) 180 sample_kwargs = sample_input.kwargs
|
/external/pytorch/test/functorch/ |
D | common_utils.py | 119 def is_valid_inplace_sample_input(sample_input, op, inplace_variant): argument 122 if sample_input.broadcasts_input: 124 if not isinstance(sample_input.input, torch.Tensor): 128 args = (sample_input.input,) + sample_input.args 129 kwargs = sample_input.kwargs 131 return sample_input.input.dtype == output_dtype
|
/external/pytorch/torch/fx/passes/ |
D | net_min_base.py | 106 sample_input: Tensors, 124 self.sample_input = sample_input 148 ShapeProp(self.module).propagate(*self.sample_input) 155 assert len(placeholders) == len(self.sample_input) 159 self.a_outputs[name] = sample_input[i] 160 self.b_outputs[name] = sample_input[i] 248 main_module(*self.sample_input)
|
D | splitter_base.py | 316 sample_input: Sequence[Any], 333 ShapeProp(self.module).propagate(*sample_input) 337 self.sample_input = sample_input 568 split_mod, submod, self.sample_input 897 …submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)
|
/external/executorch/examples/qualcomm/oss_scripts/ |
D | dino_v2.py | 55 sample_input = (torch.randn((1, 3, img_size, img_size)),) 61 sample_input,
|
D | gMLP_image_classification.py | 49 sample_input = (torch.randn(1, 3, 224, 224),) 53 sample_input,
|
/external/pytorch/test/distributed/_tensor/ |
D | test_dtensor_ops.py | 545 for sample_input in samples: 546 args = [sample_input.input] + list(sample_input.args) 547 kwargs = sample_input.kwargs
|
/external/executorch/exir/dialects/edge/op/ |
D | TARGETS | 10 "sample_input.py",
|
/external/pytorch/test/inductor/ |
D | test_torchinductor_opinfo.py | 721 for sample_input in samples: 722 args = [sample_input.input] + list(sample_input.args) 723 kwargs = sample_input.kwargs 744 "output_process_fn_grad": sample_input.output_process_fn_grad,
|