/external/pytorch/test/inductor/ |
D | test_b2b_gemm.py | 21 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 25 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 55 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 59 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 81 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 84 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 106 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 109 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 130 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 151 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: [all …]
|
/external/pytorch/torch/_inductor/ |
D | decomposition.py | 126 def assert_async_msg_decomp(tensor: torch.Tensor, msg: str) -> None: 132 def functional_assert_async_msg_decomp(tensor: torch.Tensor, msg: str) -> None: 149 x: torch.Tensor, 191 grad_output: torch.Tensor, 192 input: torch.Tensor, 193 weight: torch.Tensor, 223 def round_dec(x: torch.Tensor, decimals: int = 0) -> torch.Tensor: 231 self: torch.Tensor, 232 batch2: torch.Tensor, 254 self: torch.Tensor, [all …]
|
/external/executorch/backends/cadence/aot/ |
D | ops_registrations.py | 264 input: torch.Tensor, 276 input: torch.Tensor, 288 src: torch.Tensor, 289 weight: torch.Tensor, 290 bias: torch.Tensor, 292 weight_zero_point: torch.Tensor, 293 out_multiplier: torch.Tensor, 294 out_shift: torch.Tensor, 296 offset: Optional[torch.Tensor], 310 src: torch.Tensor, [all …]
|
/external/pytorch/torch/distributed/_symmetric_memory/ |
D | __init__.py | 137 shard: torch.Tensor, 138 shard_consumer: Callable[[torch.Tensor, int], None], 139 ag_out: torch.Tensor, 195 chunk_producer: Callable[[int, torch.Tensor], None], 196 output: torch.Tensor, 289 A_shard: torch.Tensor, 290 Bs: List[torch.Tensor], 319 def unflatten(t: torch.Tensor) -> torch.Tensor: 333 def shard_consumer(shard: torch.Tensor, rank: int) -> None: 348 A_shard: torch.Tensor, [all …]
|
/external/pytorch/torch/ao/quantization/fx/ |
D | _decomposed.py | 52 input: torch.Tensor, 89 input: torch.Tensor, 114 input: torch.Tensor, 115 scale: torch.Tensor, 116 zero_point: torch.Tensor, 139 input: torch.Tensor, 140 scale: torch.Tensor, 141 zero_point: torch.Tensor, 171 input: torch.Tensor, 172 scale: torch.Tensor, [all …]
|
/external/pytorch/torch/_decomp/ |
D | decompositions_for_jvp.py | 102 def trace(self: Tensor) -> Tensor: 107 def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]: 118 input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool 133 grad_out: Tensor, 134 input: Tensor, 136 mean: Tensor, 137 rstd: Tensor, 138 weight: Optional[Tensor], 139 bias: Optional[Tensor], 217 grad_out: Tensor, [all …]
|
D | decompositions.py | 106 def _unsqueeze_to_dim(x: Tensor, dim: int) -> Tensor: 115 def tanh_backward(out_grad: Tensor, y: Tensor): 122 def sigmoid_backward(out_grad: Tensor, y: Tensor): 129 def softplus_backward(out_grad: Tensor, x: Tensor, beta: float, threshold: float): 138 grad_output: Tensor, 143 self_or_result: Tensor, 168 def fill_tensor(self, value: Tensor): 179 def hardsigmoid(self: Tensor) -> Tensor: 186 def hardsigmoid_backward(grad_output: Tensor, self: Tensor): 197 grad_output: Tensor, self: Tensor, min_val: float, max_val: float [all …]
|
/external/executorch/examples/qualcomm/oss_scripts/llama2/model/ |
D | static_llama.py | 19 def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: 34 x: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor 102 hidden_states: torch.Tensor, 103 freqs_cos: torch.Tensor, 104 freqs_sin: torch.Tensor, 105 atten_mask: torch.Tensor, 106 k_caches: List[torch.Tensor], 107 v_caches: List[torch.Tensor], 138 hidden_states: torch.Tensor, 139 freqs_cos: torch.Tensor, [all …]
|
/external/pytorch/benchmarks/fastrnns/ |
D | cells.py | 29 input: Tensor, 30 hidden: Tuple[Tensor, Tensor], argument 31 w_ih: Tensor, 32 w_hh: Tensor, 33 b_ih: Tensor, 34 b_hh: Tensor, 53 input: Tensor, 54 hx: Tensor, 55 cx: Tensor, 56 w_ih: Tensor, [all …]
|
/external/pytorch/torch/nn/ |
D | functional.py | 437 input: Tensor, 442 _random_samples: Optional[Tensor] = None, 514 input: Tensor, 519 _random_samples: Optional[Tensor] = None, 549 input: Tensor, 554 _random_samples: Optional[Tensor] = None, 630 input: Tensor, 635 _random_samples: Optional[Tensor] = None, 665 input: Tensor, 718 input: Tensor, [all …]
|
/external/pytorch/torch/optim/ |
D | adam.py | 36 lr: Union[float, Tensor] = 1e-3, argument 321 params: List[Tensor], 322 grads: List[Tensor], 323 exp_avgs: List[Tensor], 324 exp_avg_sqs: List[Tensor], 325 max_exp_avg_sqs: List[Tensor], 326 state_steps: List[Tensor], 327 grad_scale: Optional[Tensor], 328 found_inf: Optional[Tensor], 440 params: List[Tensor], [all …]
|
D | adamw.py | 36 lr: Union[float, Tensor] = 1e-3, argument 318 params: List[Tensor], 319 grads: List[Tensor], 320 exp_avgs: List[Tensor], 321 exp_avg_sqs: List[Tensor], 322 max_exp_avg_sqs: List[Tensor], 323 state_steps: List[Tensor], 324 grad_scale: Optional[Tensor], 325 found_inf: Optional[Tensor], 330 lr: Union[Tensor, float], [all …]
|
D | _adafactor.py | 25 lr: Union[float, Tensor] = 1e-2, argument 327 params: List[Tensor], 328 grads: List[Tensor], 333 row_vars: List[Optional[Tensor]], 334 col_vars: List[Optional[Tensor]], 335 variances: List[Optional[Tensor]], 336 state_steps: List[Tensor], 337 grad_scale: Optional[Tensor], 338 found_inf: Optional[Tensor], 341 lr: Union[Tensor, float], [all …]
|
D | adadelta.py | 31 lr: Union[float, Tensor] = 1.0, argument 86 params_with_grad: List[Tensor], 87 grads: List[Tensor], 88 square_avgs: List[Tensor], 89 acc_deltas: List[Tensor], 90 state_steps: List[Tensor], 246 params: List[Tensor], 247 grads: List[Tensor], 248 square_avgs: List[Tensor], 249 acc_deltas: List[Tensor], [all …]
|
/external/pytorch/torch/distributed/tensor/experimental/ |
D | _attention.py | 69 def _maybe_wait(tensor: torch.Tensor) -> torch.Tensor: 89 def _merge_one(self, block_out: torch.Tensor, block_lse: torch.Tensor) -> None: 103 def step(self, out: torch.Tensor, lse: torch.Tensor) -> None: 122 query: torch.Tensor, 123 key: torch.Tensor, 124 value: torch.Tensor, 148 query: torch.Tensor, 149 key: torch.Tensor, 150 value: torch.Tensor, 151 attn_bias: Optional[torch.Tensor] = None, [all …]
|
/external/pytorch/torch/ao/quantization/experimental/ |
D | quantizer.py | 23 alpha: torch.Tensor, 24 gamma: torch.Tensor, 25 quantization_levels: torch.Tensor, 26 level_indices: torch.Tensor, 42 def quantize(self, tensor2quantize: Tensor): 97 def quant_dequant(self, tensor2quantize: Tensor) -> Tensor: 121 tensor2quantize: Tensor, 122 alpha: Tensor, 123 gamma: Tensor, 124 quantization_levels: Tensor, [all …]
|
/external/pytorch/torch/masked/ |
D | _ops.py | 400 def _reduction_identity(op_name: str, input: Tensor, *args): 485 def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple): 494 def _any(input: Tensor, dim: tuple, keepdim: bool): 503 def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 617 mask_input: Tensor, 737 mask_input: Tensor, 821 def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 829 def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 865 def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor: 950 def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor: [all …]
|
/external/pytorch/torch/_higher_order_ops/ |
D | flex_attention.py | 49 def _permute_strides(out: torch.Tensor, query_strides: Tuple[int, ...]) -> torch.Tensor: 94 query: torch.Tensor, 95 key: torch.Tensor, 96 value: torch.Tensor, 131 query: torch.Tensor, 132 key: torch.Tensor, 133 value: torch.Tensor, 134 out: torch.Tensor, 135 logsumexp: torch.Tensor, 136 grad_out: torch.Tensor, [all …]
|
/external/pytorch/torch/nn/modules/ |
D | transformer.py | 47 def _get_seq_len(src: Tensor, batch_first: bool) -> Optional[int]: 107 activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, argument 174 src: Tensor, 175 tgt: Tensor, 176 src_mask: Optional[Tensor] = None, 177 tgt_mask: Optional[Tensor] = None, 178 memory_mask: Optional[Tensor] = None, 179 src_key_padding_mask: Optional[Tensor] = None, 180 tgt_key_padding_mask: Optional[Tensor] = None, 181 memory_key_padding_mask: Optional[Tensor] = None, [all …]
|
/external/pytorch/torch/ao/nn/quantized/modules/ |
D | functional_modules.py | 51 def add(self, x: Tensor, y: Tensor) -> Tensor: 58 def add_scalar(self, x: Tensor, y: float) -> Tensor: 66 def mul(self, x: Tensor, y: Tensor) -> Tensor: 73 def mul_scalar(self, x: Tensor, y: float) -> Tensor: 81 def cat(self, x: List[Tensor], dim: int = 0) -> Tensor: 88 def add_relu(self, x: Tensor, y: Tensor) -> Tensor: 96 def matmul(self, x: Tensor, y: Tensor) -> Tensor: 123 def add(self, x: Tensor, y: Tensor) -> Tensor: 129 def add_scalar(self, x: Tensor, y: float) -> Tensor: 135 def mul(self, x: Tensor, y: Tensor) -> Tensor: [all …]
|
/external/executorch/examples/models/llama/source_transformation/ |
D | sdpa.py | 42 input_pos: torch.Tensor, 43 q: torch.Tensor, 44 k: torch.Tensor, 45 v: torch.Tensor, 125 input_pos: torch.Tensor, 126 q: torch.Tensor, 127 k: torch.Tensor, 128 v: torch.Tensor, 151 def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: 183 input_pos: torch.Tensor, [all …]
|
/external/executorch/backends/arm/test/ops/ |
D | test_mul.py | 61 input_: torch.Tensor, 62 other_: torch.Tensor, 67 self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] 88 self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] 113 test_data: tuple[torch.Tensor, torch.Tensor], 135 input_: torch.Tensor, 136 other_: torch.Tensor, 145 input_: torch.Tensor, 146 other_: torch.Tensor, 156 input_: torch.Tensor, [all …]
|
/external/pytorch/test/jit/ |
D | test_await.py | 34 def fn(x: Tensor): 49 def fn(x: Tensor): 65 def __init__(self, a: Tensor, b: Tensor): 72 def fn(x: Tensor): 89 def __init__(self, a: Tensor, b: Tensor): 101 def fn(x: Tensor): 120 def __init__(self, a: Tensor, b: Tensor): 131 def fn(x: Tensor): 149 def __init__(self, a: Tensor, b: Tensor): 163 def fn(x: Tensor): [all …]
|
/external/pytorch/torch/ao/quantization/pt2e/ |
D | qat_utils.py | 88 x: torch.Tensor, 89 conv_weight: torch.Tensor, 90 conv_bias: torch.Tensor, 91 bn_weight: torch.Tensor, 92 bn_bias: torch.Tensor, 93 bn_running_mean: torch.Tensor, 94 bn_running_var: torch.Tensor, 108 x: torch.Tensor, 109 conv_weight: torch.Tensor, 110 conv_bias: torch.Tensor, [all …]
|
/external/pytorch/torch/_inductor/fx_passes/ |
D | pad_mm.py | 63 def get_alignment_size(x: Tensor) -> int: 76 def check_device(a: Tensor, b: Tensor) -> bool: 80 def check_dtype(a: Tensor, b: Tensor) -> bool: 85 mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None 89 def valid_shape_and_stride(t: Optional[Tensor]) -> bool: 131 def pad_dim(x: Tensor, padded_length: int, dim: int) -> Tensor: 139 input: Tensor, mat1: Tensor, mat2: Tensor, beta: float, alpha: float 152 input: Optional[Tensor], 153 mat1: Tensor, 154 mat2: Tensor, [all …]
|