Home
last modified time | relevance | path

Searched refs:sample_input (Results 1 – 25 of 61) sorted by relevance

123

/external/executorch/backends/qualcomm/tests/
Dtest_qnn_delegate.py81 sample_input = (torch.randn(5),)
82 self.lower_module_and_test_output(module, sample_input)
86 sample_input = (torch.randn(1, 3, 2, 2),)
87 self.lower_module_and_test_output(module, sample_input)
91 sample_input = (torch.randn([4, 32, 16, 16]),)
92 self.lower_module_and_test_output(module, sample_input)
97 sample_input = (torch.randn([4, 8, 32]), torch.randn([4, 32, 8]))
98 self.lower_module_and_test_output(module, sample_input)
102 sample_input = (10 * torch.rand((9, 4, 5, 3)),)
103 self.lower_module_and_test_output(module, sample_input)
[all …]
/external/pytorch/torch/testing/_internal/opinfo/definitions/
D_masked.py100 for sample_input in sample_inputs_reduction(
104 sample_input.input.shape, device, **kwargs
106 sample_input_args, sample_input_kwargs = sample_input.args, dict(
107 mask=mask, **sample_input.kwargs
110 sample_input.input.detach().requires_grad_(requires_grad),
117 and sample_input.input.ndim == 2
119 and mask.shape == sample_input.input.shape
122 t = sample_input.input.detach()
139 for sample_input in sample_inputs_masked_reduction(
142 mask = sample_input.kwargs.get("mask")
[all …]
Dsparse.py21 sample.sample_input.input,
22 *sample.sample_input.args,
23 **sample.sample_input.kwargs,
108 for sample_input in sample_inputs(
111 yield sample_input.transform(apply_requires_grad)
129 for sample_input in sample_inputs_reduction(
132 if sample_input.input.ndim == 0:
142 if sample_input.input.ndim < 2:
146 if sample_input.input.ndim > 2 and (sample_input.input == 0).any():
157 sample_input.input.detach()
[all …]
/external/pytorch/test/mobile/
Dtest_lite_script_type.py25 sample_input = torch.tensor(5)
27 script_module_result = script_module(sample_input).a
36 mobile_module_result = mobile_module(sample_input).a
88 sample_input = torch.tensor(5)
91 script_module_result = script_module(sample_input)
96 mobile_module_result = mobile_module(sample_input)
112 sample_input = torch.tensor(5)
114 script_module_result = script_module(sample_input)
119 mobile_module_result = mobile_module(sample_input)
129 sample_input = torch.Tensor(1)
[all …]
Dtest_bytecode.py363 sample_input = torch.tensor([1])
366 script_module_result = script_module(sample_input)
/external/pytorch/torch/utils/benchmark/utils/
Dcompile.py42 sample_input: Union[torch.Tensor, Any],
64 …globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn},
76 sample_input: Union[torch.Tensor, Any],
93 compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn)
95 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
110 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
121 sample_input: Union[torch.Tensor, Any],
148 _, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer)
165 model, sample_input, num_iters, backend, mode, optimizer, loss_fn)
180 model, sample_input, num_iters, backend, None, optimizer, loss_fn)
/external/pytorch/test/
Dtest_expanded_weights.py217 def _compare_ew_and_for_loop_per_sample_grads(self, op, sample_input, reduction): argument
218 input = sample_input.input
219 args = sample_input.args
220 kwargs = sample_input.kwargs
226 sample_input, batch_size, loss_reduction
268 for sample_input in supported_inputs(op, sample_inputs):
272 sample_input = SampleInput(
273 sample_input.args[0],
274 args=(sample_input.input,),
275 kwargs=sample_input.kwargs,
[all …]
Dtest_masked.py233 for sample_input in sample_inputs_func(device, dtype):
234 mask = sample_input.kwargs.get('mask')
236 yield sample_input
238 if layout == sample_input.input.layout:
239 yield sample_input
241 sample_input_kwargs = sample_input.kwargs.copy()
243 yield SampleInput(sample_input.input.clone(),
244 args=sample_input.args,
247 sample_input_kwargs = sample_input.kwargs.copy()
249 yield SampleInput(sample_input.input.clone(),
[all …]
/external/executorch/exir/tests/
Dtest_memory_format_ops_pass.py48 sample_input=(torch.randn([3, 4, 5], dtype=torch.float32),),
59 sample_input=(torch.randn([3, 4, 5, 6], dtype=torch.float32),),
70 sample_input=(
87 sample_input=(
234 sample_input = (
242 export(toy_model, sample_input),
263 expected = before_epm.exported_program().module()(*sample_input)
264 actual = updated_epm.exported_program().module()(*sample_input)
276 sample_input=(torch.randn(1, 3, 224, 224),),
291 sample_input=(torch.randn(1, 3, 224, 224),),
[all …]
Dtest_memory_format_ops_pass_aten.py31 sample_input=(torch.randn([3, 4, 5], dtype=torch.float32),),
42 sample_input=(torch.randn([3, 4, 5, 6], dtype=torch.float32),),
53 sample_input=(
70 sample_input=(
88 sample_input=(torch.randn(1, 3, 224, 224),),
103 sample_input=(torch.randn(1, 3, 224, 224),),
Dtest_memory_format_ops_pass_utils.py32 sample_input: Tuple[Any, ...]
72 before = export(test_set.module, test_set.sample_input).run_decompositions({})
103 expected = before.module()(*test_set.sample_input)
104 actual = epm.exported_program().module()(*test_set.sample_input)
125 inputs_flattened = tree_flatten(test_set.sample_input)[0]
/external/pytorch/test/jit/
Dtest_save_load_for_op_version.py72 sample_input=st.tuples(
78 def test_versioned_div_tensor(self, sample_input): argument
104 for val_a, val_b in product(sample_input, sample_input):
125 sample_input=st.tuples(
131 def test_versioned_div_tensor_inplace(self, sample_input): argument
152 for val_a, val_b in product(sample_input, sample_input):
175 sample_input=st.tuples(
181 def test_versioned_div_tensor_out(self, sample_input): argument
205 for val_a, val_b in product(sample_input, sample_input):
232 sample_input=st.tuples(
[all …]
Dtest_dtype_analysis.py278 sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[
281 input_args = [sample_input.input, *sample_input.args]
339 sample_input = first_sample(self, samples)
340 input_args = [sample_input.input, *sample_input.args]
341 expected_res = op(*input_args, **sample_input.kwargs)
353 traced_fn(sample_input.input, *sample_input.args, **sample_input.kwargs)
359 v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)
/external/pytorch/test/dynamo/
Dtest_fx_passes_pre_grad.py24 sample_input = torch.randn(4, 4)
26 m(sample_input)
27 exported_program = torch.export.export(m, (sample_input,))
30 pass_execution_and_save(fx_pass, gm, sample_input, "Apply testing pass")
/external/executorch/backends/vulkan/_passes/
Dtest_custom_ops.py104 sample_input = (torch.rand(2, 5, 2, 3), 4, 0.5)
105 custom_out = model(*sample_input)
117 expected_out = calculate_expected_output(*sample_input)
/external/pytorch/test/jit/xnnpack/
Dtest_xnnpack_delegate.py36 sample_input = torch.randn(4, 4, 4)
37 actual_output = scripted_module(sample_input)
38 expected_output = lowered_module(sample_input)
/external/executorch/exir/dialects/edge/spec/
Dgen.py16 from executorch.exir.dialects.edge.op.sample_input import SAMPLE_INPUT
165 sample_input = next(
174 sample_input = next(
179 sample_args = [sample_input.input] + list(sample_input.args)
180 sample_kwargs = sample_input.kwargs
/external/pytorch/test/functorch/
Dcommon_utils.py119 def is_valid_inplace_sample_input(sample_input, op, inplace_variant): argument
122 if sample_input.broadcasts_input:
124 if not isinstance(sample_input.input, torch.Tensor):
128 args = (sample_input.input,) + sample_input.args
129 kwargs = sample_input.kwargs
131 return sample_input.input.dtype == output_dtype
/external/pytorch/torch/fx/passes/
Dnet_min_base.py106 sample_input: Tensors,
124 self.sample_input = sample_input
148 ShapeProp(self.module).propagate(*self.sample_input)
155 assert len(placeholders) == len(self.sample_input)
159 self.a_outputs[name] = sample_input[i]
160 self.b_outputs[name] = sample_input[i]
248 main_module(*self.sample_input)
Dsplitter_base.py316 sample_input: Sequence[Any],
333 ShapeProp(self.module).propagate(*sample_input)
337 self.sample_input = sample_input
568 split_mod, submod, self.sample_input
897 …submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)
/external/executorch/examples/qualcomm/oss_scripts/
Ddino_v2.py55 sample_input = (torch.randn((1, 3, img_size, img_size)),)
61 sample_input,
DgMLP_image_classification.py49 sample_input = (torch.randn(1, 3, 224, 224),)
53 sample_input,
/external/pytorch/test/distributed/_tensor/
Dtest_dtensor_ops.py545 for sample_input in samples:
546 args = [sample_input.input] + list(sample_input.args)
547 kwargs = sample_input.kwargs
/external/executorch/exir/dialects/edge/op/
DTARGETS10 "sample_input.py",
/external/pytorch/test/inductor/
Dtest_torchinductor_opinfo.py721 for sample_input in samples:
722 args = [sample_input.input] + list(sample_input.args)
723 kwargs = sample_input.kwargs
744 "output_process_fn_grad": sample_input.output_process_fn_grad,

123