• Home
  • Raw
  • Download

Lines Matching full:torch

4 While most of the torch API and handling for ``__torch_function__`` happens
5 at the C++ level, some of the torch API is written in Python so we need
8 has_torch_function. See torch/functional.py and test/test_overrides.py
33 import torch
34 from torch._C import (
64 module: str = "torch",
105 A tuple of functions that are publicly available in the torch API but cannot
111 >>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
113 >>> torch.add in torch.overrides.get_ignored_functions()
116 Tensor = torch.Tensor
118 torch.typename,
119 torch.is_tensor,
120 torch.is_storage,
121 torch.set_default_tensor_type,
122 torch.set_default_device,
123 torch.get_default_device,
124 torch.set_rng_state,
125 torch.get_rng_state,
126 torch.manual_seed,
127 torch.initial_seed,
128 torch.seed,
129 torch.save,
130 torch.load,
131 torch.set_printoptions,
132 torch.fork,
133 torch.get_default_dtype,
134 torch.get_num_interop_threads,
135 torch.get_num_threads,
136 torch.init_num_threads,
137 torch.import_ir_module,
138 torch.import_ir_module_from_buffer,
139 torch.is_anomaly_enabled,
140 torch.is_anomaly_check_nan_enabled,
141 torch.is_grad_enabled,
142 torch.merge_type_from_type_comment,
143 torch.parse_ir,
144 torch.parse_schema,
145 torch.parse_type_comment,
146 torch.set_anomaly_enabled,
147 torch.set_flush_denormal,
148 torch.set_num_interop_threads,
149 torch.set_num_threads,
150 torch.wait,
151 torch.as_tensor,
152 torch.from_numpy,
153 torch.get_device,
154 torch.tensor,
155 torch.default_generator,
156 torch.has_cuda,
157 torch.has_cudnn,
158 torch.has_lapack,
159 torch.device,
160 torch.dtype,
161 torch.finfo,
162 torch.has_mkl,
163 torch.has_mps,
164 torch.has_mkldnn,
165 torch.has_openmp,
166 torch.iinfo,
167 torch.memory_format,
168 torch.qscheme,
169 torch.set_grad_enabled,
170 torch.no_grad,
171 torch.enable_grad,
172 torch.inference_mode,
173 torch.is_inference_mode_enabled,
174 torch.layout,
175 torch.align_tensors,
176 torch.arange,
177 torch.as_strided,
178 torch.bartlett_window,
179 torch.blackman_window,
180 torch.broadcast_shapes,
181 torch.can_cast,
182 torch.compile,
183 torch.cudnn_affine_grid_generator,
184 torch.cudnn_batch_norm,
185 torch.cudnn_convolution,
186 torch.cudnn_convolution_transpose,
187 torch.cudnn_convolution_relu,
188 torch.cudnn_convolution_add_relu,
189 torch.cudnn_grid_sampler,
190 torch.cudnn_is_acceptable,
191 torch.empty,
192 torch.empty_permuted,
193 torch.empty_strided,
194 torch.empty_quantized,
195 torch.export.export,
196 torch.export.load,
197 torch.export.register_dataclass,
198 torch.export.save,
199 torch.eye,
200 torch.fft.fftfreq,
201 torch.fft.rfftfreq,
202 torch.from_file,
203 torch.full,
204 torch.fill,
205 torch.hamming_window,
206 torch.hann_window,
207 torch.kaiser_window,
208 torch.linspace,
209 torch.logspace,
210 torch.mkldnn_adaptive_avg_pool2d,
211 torch.mkldnn_convolution,
212 torch.mkldnn_max_pool2d,
213 torch.mkldnn_max_pool3d,
214 torch.mkldnn_linear_backward_weights,
215 torch.mkldnn_rnn_layer,
216 torch.normal,
217 torch.ones,
218 torch.promote_types,
219 torch.rand,
220 torch.randn,
221 torch.randint,
222 torch.randperm,
223 torch.range,
224 torch.result_type,
225 torch.scalar_tensor,
226 torch.sparse_coo_tensor,
227 torch.sparse_compressed_tensor,
228 torch.sparse_csr_tensor,
229 torch.sparse_csc_tensor,
230 torch.sparse_bsr_tensor,
231 torch.sparse_bsc_tensor,
232 torch.sym_constrain_range,
233 torch.sym_constrain_range_for_size,
234 torch.tril_indices,
235 torch.triu_indices,
236 torch.vander,
237 torch.zeros,
238 torch._jit_internal.boolean_dispatch,
239 torch.nn.functional.assert_int_or_pair,
240 torch.nn.functional.upsample,
241 torch.nn.functional.upsample_bilinear,
242 torch.nn.functional.upsample_nearest,
243 torch.nn.functional.has_torch_function,
244 torch.nn.functional.has_torch_function_unary,
245 torch.nn.functional.has_torch_function_variadic,
246 torch.nn.functional.handle_torch_function,
247 torch.nn.functional.sigmoid,
248 torch.nn.functional.hardsigmoid,
249 torch.nn.functional.tanh,
250 torch.nn.functional._canonical_mask,
251 torch.nn.functional._none_or_dtype,
253 torch.nn.init.calculate_gain,
255 torch.nn.init.uniform,
256 torch.nn.init.normal,
257 torch.nn.init.constant,
258 torch.nn.init.eye,
259 torch.nn.init.dirac,
260 torch.nn.init.xavier_uniform,
261 torch.nn.init.xavier_normal,
262 torch.nn.init.kaiming_uniform,
263 torch.nn.init.kaiming_normal,
264 torch.nn.init.orthogonal,
265 torch.nn.init.sparse,
266 torch.nested.to_padded_tensor,
269 torch.set_autocast_enabled,
270 torch.is_autocast_enabled,
271 torch.set_autocast_dtype,
272 torch.get_autocast_dtype,
273 torch.clear_autocast_cache,
274 torch.set_autocast_cpu_enabled,
275 torch.is_autocast_cpu_enabled,
276 torch.set_autocast_xla_enabled,
277 torch.is_autocast_xla_enabled,
278 torch.set_autocast_ipu_enabled,
279 torch.is_autocast_ipu_enabled,
280 torch.set_autocast_cpu_dtype,
281 torch.get_autocast_cpu_dtype,
282 torch.set_autocast_ipu_dtype,
283 torch.get_autocast_ipu_dtype,
284 torch.get_autocast_gpu_dtype,
285 torch.set_autocast_gpu_dtype,
286 torch.get_autocast_xla_dtype,
287 torch.set_autocast_xla_dtype,
288 torch.autocast_increment_nesting,
289 torch.autocast_decrement_nesting,
290 torch.is_autocast_cache_enabled,
291 torch.set_autocast_cache_enabled,
292 torch.nn.functional.hardswish,
293 torch.is_vulkan_available,
294 torch.are_deterministic_algorithms_enabled,
295 torch.use_deterministic_algorithms,
296 torch.is_deterministic_algorithms_warn_only_enabled,
297 torch.set_deterministic_debug_mode,
298 torch.get_device_module,
299 torch.get_deterministic_debug_mode,
300 torch.set_float32_matmul_precision,
301 torch.get_float32_matmul_precision,
302 torch.unify_type_list,
303 torch.is_warn_always_enabled,
304 torch.set_warn_always,
305 torch.vitals_enabled,
306 torch.set_vital,
307 torch.read_vitals,
308 torch.vmap,
309 torch.cond,
310 torch.frombuffer,
311 torch.asarray,
312 torch._functional_sym_constrain_range,
313 torch._make_dep_token,
394 Tensor = torch.Tensor
418 >>> my_add = torch.overrides.get_testing_overrides()[torch.add]
429 Tensor = torch.Tensor
431 torch.abs: lambda input, out=None: -1,
432 torch.absolute: lambda input, out=None: -1,
433 torch.adaptive_avg_pool1d: lambda input, output_size: -1,
434 torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
435 torch.acos: lambda input, out=None: -1,
436 torch.adjoint: lambda input: -1,
437 torch.arccos: lambda input, out=None: -1,
438 torch.acosh: lambda input, out=None: -1,
439 torch.arccosh: lambda input, out=None: -1,
440 torch.add: lambda input, other, out=None: -1,
441 torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
442 torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
443 torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
444 torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
445 torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
446 torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
447 torch.affine_grid_generator: lambda theta, size, align_corners: -1,
448 torch.all: lambda input, dim=None: -1,
449 torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
450 torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
451 torch.amax: lambda input, dim=None: -1,
452 torch.amin: lambda input, dim=None: -1,
453 torch.aminmax: lambda input, dim=None, keepdim=False, out=None: -1,
454 torch.angle: lambda input, out=None: -1,
455 torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
456 torch.argmax: lambda input: -1,
457 torch.argmin: lambda input: -1,
458 torch.argsort: lambda input, dim=None: -1,
459 torch.asin: lambda input, out=None: -1,
460 torch._assert_async: lambda input, msg: -1,
461 torch.arcsin: lambda input, out=None: -1,
462 torch.asinh: lambda input, out=None: -1,
463 torch.arcsinh: lambda input, out=None: -1,
464 torch.atan: lambda input, out=None: -1,
465 torch.arctan: lambda input, out=None: -1,
466 torch.atan2: lambda input, other, out=None: -1,
467 torch.arctan2: lambda input, other, out=None: -1,
468 torch.atanh: lambda input, out=None: -1,
469 torch.arctanh: lambda input, out=None: -1,
470 torch.atleast_1d: lambda *tensors: -1,
471 torch.atleast_2d: lambda *tensors: -1,
472 torch.atleast_3d: lambda *tensors: -1,
473torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_includ…
474 torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
475torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, …
476torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu,…
477torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g,…
478 torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
479torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, ep…
480torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, …
481 torch.batch_norm_stats: lambda input, eps: -1,
482 torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
483 torch.bernoulli: lambda input, generator=None, out=None: -1,
484 torch.bilinear: lambda input1, input2, weight, bias: -1,
485 torch.binary_cross_entropy_with_logits: (
488 torch.bincount: lambda input, weights=None, minlength=0: -1,
489 torch.binomial: lambda count, prob, generator=None: -1,
490 torch.bitwise_and: lambda input, other, out=None: -1,
491 torch.bitwise_not: lambda input, out=None: -1,
492 torch.bitwise_or: lambda input, other, out=None: -1,
493 torch.bitwise_xor: lambda input, other, out=None: -1,
494 torch.bitwise_left_shift: lambda input, other, out=None: -1,
495 torch.bitwise_right_shift: lambda input, other, out=None: -1,
496 torch.block_diag: lambda *tensors: -1,
497 torch.bmm: lambda input, mat2, out=None: -1,
498 torch.broadcast_tensors: lambda *tensors: -1,
499 torch.broadcast_to: lambda self, size: -1,
500 torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
501 torch.cartesian_prod: lambda *tensors: -1,
502 torch.cat: lambda tensors, dim=0, out=None: -1,
503 torch.concat: lambda tensors, dim=0, out=None: -1, # alias for torch.cat
504 torch.concatenate: lambda tensors, dim=0, out=None: -1, # alias for torch.concatenate
505 torch.cdist: lambda x1, x2, p=2.0, compute_mode="use_mm_for_euclid_dist_if_necessary": -1,
506 torch.ceil: lambda input, out=None: -1,
507 torch.celu: lambda input, alpha=1.0, inplace=False: -1,
508 torch.chain_matmul: lambda *matrices, out=None: -1,
509 torch.channel_shuffle: lambda input, groups: -1,
510 torch.cholesky: lambda input, upper=False, out=None: -1,
511 torch.linalg.cholesky: lambda input, out=None: -1,
512 torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
513 torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
514 torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
515 torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
516 torch.chunk: lambda input, chunks, dim=0: -1,
517 torch.clamp: lambda input, min=None, max=None, out=None: -1,
518 torch.clip: lambda input, min=None, max=None, out=None: -1,
519 torch.clamp_min: lambda input, min, out=None: -1,
520 torch.clamp_max: lambda input, max, out=None: -1,
521 torch.column_stack: lambda tensors, out=None: -1,
522 torch.cov: lambda input, correction=1, fweights=None, aweights=None: -1,
523 torch.clone: lambda input: -1,
524 torch.combinations: lambda input, r=2, with_replacement=False: -1,
525 torch.complex: lambda real, imag: -1,
526 torch.copysign: lambda input, other, out=None: -1,
527 torch.polar: lambda abs, ang: -1,
528 torch.linalg.cond: lambda input, ord=None: -1,
529 torch.conj: lambda input, out=None: -1,
530 torch.conj_physical: lambda input, out=None: -1,
531 torch.resolve_conj: lambda input, out=None: -1,
532 torch.resolve_neg: lambda input, out=None: -1,
533 torch.constant_pad_nd: lambda input, pad, value=0: -1,
534torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
535torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
536torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
537torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_addin…
538 torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
539torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, gr…
540torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, gr…
541torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, gr…
542 torch.corrcoef: lambda input: -1,
543 torch.cos: lambda input, out=None: -1,
544torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=No…
545 torch.cosh: lambda input, out=None: -1,
546 torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
547 torch.count_nonzero: lambda input: -1,
548 torch.cross: lambda input, other, dim=None, out=None: -1,
549 torch.linalg.cross: lambda input, other, dim=-1, out=None: -1,
550 torch.ctc_loss: (
553 torch.cummax: lambda input, dim, out=None: -1,
554 torch.cummin: lambda input, dim, out=None: -1,
555 torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
556 torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
557 torch.cumulative_trapezoid: lambda y, x=None, dim=-1: -1,
558 torch.logcumsumexp: lambda input, dim, out=None: -1,
559 torch.deg2rad: lambda input, out=None: -1,
560 torch.dequantize: lambda input: -1,
561 torch.det: lambda input: -1,
562 torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
563 torch.detach: lambda input: -1,
564 torch.diag: lambda input, diagonal=0, out=None: -1,
565 torch.diag_embed: lambda input, diagonal=0, out=None: -1,
566 torch.diagflat: lambda input, offset=0: -1,
567 torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
568 torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
569 torch.linalg.diagonal: lambda input, offset=0, dim1=-2, dim2=-1: -1,
570 torch.diagonal_scatter: lambda input, src, offset=0, dim1=0, dim2=1: -1,
571 torch.as_strided_scatter: lambda self, src, size, stride, storage_offset=None: -1,
572 torch.digamma: lambda input, out=None: -1,
573 torch.dist: lambda input, other, p=2: -1,
574 torch.div: lambda input, other, rounding_mode=None, out=None: -1,
575 torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
576 torch.dot: lambda input, other, out=None: -1,
577 torch.dropout: lambda input, p, train, inplace=False: -1,
578 torch.dsmm: lambda input, mat2: -1,
579 torch.hsmm: lambda mat1, mat2: -1,
580 torch.dsplit: lambda input, indices_or_sections: -1,
581 torch.dstack: lambda tensors, out=None: -1,
582 torch.linalg.eig: lambda input, out=None: -1,
583 torch.linalg.eigvals: lambda input, out=None: -1,
584 torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
585 torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
586 torch.einsum: lambda equation, *operands: -1,
587 torch.embedding: (
590 torch.embedding_bag: (
593torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
594 torch.eq: lambda input, other, out=None: -1,
595 torch.equal: lambda input, other: -1,
596 torch.erf: lambda input, out=None: -1,
597 torch.erfc: lambda input, out=None: -1,
598 torch.erfinv: lambda input, out=None: -1,
599 torch.exp: lambda input, out=None: -1,
600 torch.exp2: lambda input, out=None: -1,
601 torch.expm1: lambda input, out=None: -1,
602torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_ma…
603torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
604 torch.fused_moving_avg_obs_fake_quant: (
607 torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
608 torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
609torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_z…
610 torch.fbgemm_linear_int8_weight_fp32_activation: (
613 torch.fbgemm_linear_quantize_weight: lambda input: -1,
614 torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
615 torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
616 torch.feature_alpha_dropout: lambda input, p, train: -1,
617 torch.feature_dropout: lambda input, p, train: -1,
618 torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
619 torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
620 torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
621 torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
622 torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
623 torch.fft.hfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
624 torch.fft.ihfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
625 torch.fft.hfftn: lambda input, s=None, dim=-1, norm=None: -1,
626 torch.fft.ihfftn: lambda input, s=None, dim=-1, norm=None: -1,
627 torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
628 torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
629 torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
630 torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
631 torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
632 torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
633 torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
634 torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
635 torch.fft.fftshift: lambda input, dim=None: -1,
636 torch.fft.ifftshift: lambda input, dim=None: -1,
637 torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
638 torch.fix: lambda input, out=None: -1,
639 torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
640 torch.flip: lambda input, dims: -1,
641 torch.fliplr: lambda input: -1,
642 torch.flipud: lambda input: -1,
643 torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
644 torch.floor: lambda input, out=None: -1,
645 torch.floor_divide: lambda input, other: -1,
646 torch.float_power: lambda input, exponent, out=None: -1,
647 torch.fmod: lambda input, other, out=None: -1,
648 torch.frac: lambda input, out=None: -1,
649 torch.frexp: lambda input, out=None: -1,
650torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None…
651 torch._functional_assert_async: lambda input, msg, dep_token: -1,
652 torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
653 torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
654 torch.gcd: lambda input, other, out=None: -1,
655 torch.ge: lambda input, other, out=None: -1,
656 torch.greater_equal: lambda input, other, out=None: -1,
657 torch.geqrf: lambda input, out=None: -1,
658 torch.i0: lambda input, out=None: -1,
659 torch.inner: lambda input, other, out=None: -1,
660 torch.outer: lambda input, vec2, out=None: -1,
661 torch.ger: lambda input, vec2, out=None: -1, # alias for torch.outer
662 torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1,
663 torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
664torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
665torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
666torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True:…
667torch.gru: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_…
668 torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
669 torch.gt: lambda input, other, out=None: -1,
670 torch.greater: lambda input, other, out=None: -1,
671 torch.hardshrink: lambda input, lambd=0.5: -1,
672 torch.heaviside: lambda input, values, out=None: -1,
673torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, redu…
674 torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
675torch.histogram: lambda input, bins=100, min=None, max=None, weight=None, density=False, out=None:…
676 torch.histogramdd: lambda input, bins, range=None, weight=None, density=False: -1,
677 torch.linalg.householder_product: lambda input, tau: -1,
678 torch.hspmm: lambda mat1, mat2, out=None: -1,
679 torch.hsplit: lambda input, indices_or_sections: -1,
680 torch.hstack: lambda tensors, out=None: -1,
681 torch.hypot: lambda input, other, out=None: -1,
682 torch.igamma: lambda input, other, out=None: -1,
683 torch.igammac: lambda input, other, out=None: -1,
684 torch.imag: lambda input, out=None: -1,
685 torch.index_add: lambda input, dim, index, source: -1,
686 torch.index_copy: lambda input, dim, index, source: -1,
687 torch.index_put: lambda input, indices, values, accumulate=False: -1,
688 torch.index_select: lambda input, dim, index, out=None: -1,
689 torch.index_fill: lambda input, dim, index, value: -1,
690 torch.index_reduce: lambda input, dim, index, source, reduce, include_input=True: -1,
691 torch.isfinite: lambda tensor: -1,
692 torch.isin: lambda e, te, assume_unique=False, invert=False: -1,
693 torch.isinf: lambda tensor: -1,
694 torch.isreal: lambda tensor: -1,
695 torch.isposinf: lambda input, out=None: -1,
696 torch.isneginf: lambda input, out=None: -1,
697 torch.instance_norm: (
700 torch.int_repr: lambda input: -1,
701 torch.inverse: lambda input, out=None: -1,
702 torch.linalg.inv: lambda input, out=None: -1,
703 torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1,
704 torch.is_complex: lambda input: -1,
705 torch.is_conj: lambda input: -1,
706 torch.is_neg: lambda input: -1,
707 torch.is_distributed: lambda input: -1,
708 torch.is_inference: lambda input: -1,
709 torch.is_floating_point: lambda input: -1,
710 torch.is_nonzero: lambda input: -1,
711 torch.is_same_size: lambda input, other: -1,
712 torch.is_signed: lambda input: -1,
713 torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
714 torch.isnan: lambda input: -1,
715 torch.istft: (
718torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction="mean", log_target=F…
719 torch.kron: lambda input, other: -1,
720 torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
721 torch.linalg.ldl_factor_ex: lambda input, hermitian=False, check_errors=False, out=None: -1,
722 torch.linalg.ldl_factor: lambda input, hermitian=False, out=None: -1,
723 torch.linalg.ldl_solve: lambda LD, pivots, B, hermitian=False, out=None: -1,
724torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled…
725 torch.lcm: lambda input, other, out=None: -1,
726 torch.ldexp: lambda input, other, out=None: -1,
727 torch.le: lambda input, other, out=None: -1,
728 torch.less_equal: lambda input, other, out=None: -1,
729 torch.lerp: lambda input, end, weight, out=None: -1,
730 torch.lgamma: lambda input, out=None: -1,
731torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest…
732 torch.log: lambda input, out=None: -1,
733 torch.log_softmax: lambda input, dim, dtype=None: -1,
734 torch.log10: lambda input, out=None: -1,
735 torch.log1p: lambda input, out=None: -1,
736 torch.log2: lambda input, out=None: -1,
737 torch.logaddexp: lambda input, other, out=None: -1,
738 torch.logaddexp2: lambda input, other, out=None: -1,
739 torch.logdet: lambda input: -1,
740 torch.xlogy: lambda x, y, out=None: -1,
741 torch.logical_and: lambda input, other, out=None: -1,
742 torch.logical_not: lambda input, out=None: -1,
743 torch.logical_or: lambda input, other, out=None: -1,
744 torch.logical_xor: lambda input, other, out=None: -1,
745 torch.logit: lambda input, eps=None: -1,
746 torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
747torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirect…
748 torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
749 torch.lt: lambda input, other, out=None: -1,
750 torch.less: lambda input, other, out=None: -1,
751 torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
752 torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
753torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None…
754 torch.masked_fill: lambda input, mask, value: -1,
755 torch.masked_scatter: lambda input, mask, source: -1,
756 torch.masked_select: lambda input, mask, out=None: -1,
757 torch.matmul: lambda input, other, out=None: -1,
758 torch.linalg.lu: lambda input, pivot=True, out=None: -1,
759 torch.linalg.lu_factor: lambda input, pivot=True, out=None: -1,
760 torch.linalg.lu_factor_ex: lambda input, pivot=True, check_errors=False, out=None: -1,
761 torch.linalg.lu_solve: lambda LU, pivots, B, left=True, adjoint=False, out=None: -1,
762 torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul
763 torch.matrix_power: lambda input, n: -1,
764 torch.linalg.matrix_power: lambda input, n, out=None: -1,
765 torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
766 torch.linalg.multi_dot: lambda tensors, out=None: -1,
767 torch.matrix_exp: lambda input: -1,
768 torch.linalg.matrix_exp: lambda input: -1,
769 torch.max: lambda input, out=None: -1,
770 torch.maximum: lambda input, other, out=None: -1,
771 torch.fmax: lambda input, other, out=None: -1,
772torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: …
773torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: …
774torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: …
775 torch.max_pool1d_with_indices: (
778 torch.mean: lambda input, dim=None: -1,
779 torch.nanmean: lambda input, dim=None, keepdim=False, dtype=None, out=None: -1,
780 torch.median: lambda input, dim=None: -1,
781 torch.nanmedian: lambda input, dim=None: -1,
782 torch.meshgrid: lambda *tensors, **kwargs: -1,
783 torch.min: lambda input, out=None: -1,
784 torch.minimum: lambda input, other, out=None: -1,
785 torch.fmin: lambda input, other, out=None: -1,
786 torch.miopen_batch_norm: (
789torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark…
790torch.miopen_convolution_add_relu: lambda input, weight, z, alpha, bias, stride, padding, dilation…
791torch.miopen_convolution_relu: lambda input, weight, bias, stride, padding, dilation, groups: -1,
792 torch.miopen_convolution_transpose: (
795 torch.miopen_depthwise_convolution: (
798 torch.miopen_rnn: (
801 torch.mm: lambda input, mat2, out=None: -1,
802 torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
803 torch.movedim: lambda input, source, destination: -1,
804 torch.moveaxis: lambda input, source, destination: -1,
805 torch.msort: lambda input, descending=False, out=None: -1,
806 torch.mul: lambda input, other, out=None: -1,
807 torch.multiply: lambda input, other, out=None: -1,
808 torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
809 torch.mv: lambda input, vec, out=None: -1,
810 torch.mvlgamma: lambda input, p: -1,
811 torch.narrow: lambda input, dim, start, length: -1,
812 torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
813torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum…
814 torch._native_batch_norm_legit: lambda input, weight, bias, training, momentum, eps: -1,
815 torch.native_dropout: lambda input, p, train: -1,
816torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
817 torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
818 torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
819 torch.native_channel_shuffle: lambda input, groups: -1,
820 torch.ne: lambda input, other, out=None: -1,
821 torch.not_equal: lambda input, other, out=None: -1,
822 torch.neg: lambda input, out=None: -1,
823 torch.negative: lambda input, out=None: -1,
824 torch.nextafter: lambda input, other, out=None: -1,
825 torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
826 torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
827torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
828torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=Fa…
829torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
830torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=Fa…
831torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
832torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=Fa…
833 torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
834 torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
835 torch.nn.functional.avg_pool2d: (
838 torch.nn.functional.avg_pool3d: (
841 torch.nn.functional.batch_norm: (
844 torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
845 torch.nn.functional.binary_cross_entropy: (
848 torch.nn.functional.binary_cross_entropy_with_logits: (
851 torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
852 torch.nn.functional.cosine_embedding_loss: (
855 torch.nn.functional.cross_entropy: (
858 torch.nn.functional.ctc_loss: (
861 torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
862 torch.nn.functional.dropout1d: lambda input, p=0.5, training=True, inplace=False: -1,
863 torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
864 torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
865 torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
866 torch.nn.functional.embedding: (
869 torch.nn.functional.embedding_bag: (
872torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
873torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1:…
874 torch.nn.functional.fractional_max_pool2d: (
877 torch.nn.functional.fractional_max_pool2d_with_indices: (
880 torch.nn.functional.fractional_max_pool3d: (
883 torch.nn.functional.fractional_max_pool3d_with_indices: (
886torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction…
887 torch.nn.functional.gelu: lambda input, approximate="none": -1,
888 torch.nn.functional.glu: lambda input, dim=-1: -1,
889torch.nn.functional.grid_sample: lambda input, grid, mode="bilinear", padding_mode="zeros", align_…
890torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
891 torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
892 torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
893 torch.nn.functional.hardtanh: lambda input, min_val=-1.0, max_val=1.0, inplace=False: -1,
894 torch.nn.functional.hinge_embedding_loss: (
897 torch.nn.functional.instance_norm: (
900 torch.nn.functional.interpolate: (
903torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction="mean"…
904torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction="mean…
905torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05:…
906 torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
907 torch.nn.functional.linear: lambda input, weight, bias=None: -1,
908torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
909 torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
910 torch.nn.functional.logsigmoid: lambda input: -1,
911torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False:…
912torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False:…
913torch.nn.functional.lp_pool3d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False:…
914 torch.nn.functional.margin_ranking_loss: (
917 torch.nn.functional.max_pool1d: (
920 torch.nn.functional.max_pool1d_with_indices: (
923 torch.nn.functional.max_pool2d: (
926 torch.nn.functional.max_pool2d_with_indices: (
929 torch.nn.functional.max_pool3d: (
932 torch.nn.functional.max_pool3d_with_indices: (
935torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, outp…
936torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, outp…
937torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, outp…
938torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction="mea…
939 torch.nn.functional.multi_head_attention_forward: (
942 torch.nn.functional.multi_margin_loss: (
945 torch.nn.functional.multilabel_margin_loss: (
948 torch.nn.functional.multilabel_soft_margin_loss: (
951 torch.nn.functional.nll_loss: (
954 torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
955 torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
956 torch.nn.functional.pad: lambda input, pad, mode="constant", value=0: -1,
957 torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
958 torch.nn.functional.poisson_nll_loss: (
961 torch.nn.functional.prelu: lambda input, weight: -1,
962 torch.nn.functional.relu: lambda input, inplace=False: -1,
963 torch.nn.functional.relu6: lambda input, inplace=False: -1,
964 torch.nn.functional.rms_norm: lambda input, normalized_shape, weight=None, eps=1e-6: -1,
965torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, in…
966 torch.nn.functional.selu: lambda input, inplace=False: -1,
967 torch.nn.functional.silu: lambda input, inplace=False: -1,
968 torch.nn.functional.mish: lambda input, inplace=False: -1,
969torch.nn.functional.scaled_dot_product_attention: lambda query, key, value, attn_mask=None, dropou…
970torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reductio…
971 torch.nn.functional.huber_loss: lambda input, target, reduction="mean", delta=1.0: -1,
972torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduct…
973 torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
974 torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
975 torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
976 torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
977 torch.nn.functional.softsign: lambda input: -1,
978 torch.nn.functional.tanhshrink: lambda input: -1,
979 torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
980 torch.nn.functional.triplet_margin_loss: (
983 torch.nn.functional.triplet_margin_with_distance_loss: (
986 torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
987 torch.nn.init.uniform_: lambda tensor, a=0.0, b=1.0, generator=None: -1,
988 torch.nn.init.normal_: lambda tensor, mean=0.0, std=1.0, generator=None: -1,
989 torch.nn.init.constant_: lambda tensor, val: -1,
990torch.nn.init.kaiming_uniform_: lambda tensor, a=0, mode="fan_in", nonlinearity="leaky_relu", gene…
991 torch.nonzero: lambda input, as_tuple=False: -1,
992 torch.nonzero_static: lambda input, *, size, fill_value=-1: -1,
993 torch.argwhere: lambda input: -1,
994 torch.norm: lambda input, p="fro", dim=None, keepdim=False, out=None, dtype=None: -1,
995torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
996torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
997 torch.linalg.matrix_norm: lambda input, ord="fro", dim=(
1001 torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
1002torch.nuclear_norm: lambda input, p="fro", dim=None, keepdim=False, out=None, dtype=None: -1,
1003 torch.numel: lambda input: -1,
1004 torch.orgqr: lambda input, tau: -1,
1005 torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
1006 torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
1007 torch.permute: lambda self, dim: -1,
1008 torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
1009 torch.pdist: lambda input, p=2: -1,
1010 torch.pinverse: lambda input, rcond=1e-15: -1,
1011 torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
1012 torch.pixel_shuffle: lambda input, upscale_factor: -1,
1013 torch.pixel_unshuffle: lambda input, downscale_factor: -1,
1014 torch.poisson: lambda input, generator=None: -1,
1015 torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
1016 torch.polygamma: lambda input, n, out=None: -1,
1017 torch.positive: lambda input, out=None: -1,
1018 torch.prelu: lambda input, weight: -1,
1019torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
1020 torch.pow: lambda input, exponent, out=None: -1,
1021 torch.prod: lambda input, dtype=None: -1,
1022 torch.put: lambda input, index, source, accumulate=False: -1,
1023 torch.q_per_channel_axis: lambda input: -1,
1024 torch.q_per_channel_scales: lambda input: -1,
1025 torch.q_per_channel_zero_points: lambda input: -1,
1026 torch.q_scale: lambda input: -1,
1027 torch.q_zero_point: lambda input: -1,
1028 torch.qr: lambda input, some=True, out=None: -1,
1029 torch.linalg.qr: lambda input, mode="reduced", out=None: -1,
1030torch.quantile: lambda input, q, dim=None, keepdim=False, interpolation="linear", out=None: -1,
1031torch.nanquantile: lambda input, q, dim=None, keepdim=False, interpolation="linear", out=None: -1,
1032 torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
1033 torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
1034 torch.quantize_per_tensor_dynamic: lambda input, dtype, reduce_range: -1,
1035torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_…
1036 torch.quantized_gru_cell: (
1039 torch.quantized_lstm_cell: (
1042 torch.quantized_max_pool1d: (
1047 torch.quantized_max_pool2d: (
1053 torch.quantized_max_pool3d: (
1060 torch.quantized_rnn_relu_cell: (
1063 torch.quantized_rnn_tanh_cell: (
1066 torch.rad2deg: lambda input, out=None: -1,
1067torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
1068torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_gr…
1069torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
1070 torch.ravel: lambda input: -1,
1071 torch.real: lambda input, out=None: -1,
1072 torch.vdot: lambda input, other, out=None: -1,
1073 torch.linalg.vecdot: lambda input, other, dim=-1, out=None: -1,
1074 torch.view_as_real: lambda input: -1,
1075 torch.view_as_complex: lambda input: -1,
1076 torch.reciprocal: lambda input, out=None: -1,
1077 torch.relu: lambda input, inplace=False: -1,
1078 torch.remainder: lambda input, other, out=None: -1,
1079 torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
1080 torch.repeat_interleave: lambda input, dim=None: -1,
1081 torch.reshape: lambda input, shape: -1,
1082 torch.rms_norm: lambda input, normalized_shape, weight=None, eps=1e-6: -1,
1083torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, b…
1084 torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
1085torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, b…
1086 torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
1087 torch.roll: lambda input, shifts, dims=None: -1,
1088 torch.rot90: lambda input, k=1, dims=(0, 1): -1,
1089 torch.round: lambda input, out=None: -1,
1090 torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
1091 torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
1092 torch.rrelu: lambda input, lower=1.0 / 8, upper=1.0 / 3, training=False, inplace=False: -1,
1093 torch.rsqrt: lambda input, out=None: -1,
1094 torch.rsub: lambda input, other, alpha=1: -1,
1095 torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
1096 torch.scatter: lambda input, dim, index, src: -1,
1097 torch.scatter_add: lambda input, dim, index, src: -1,
1098 torch.scatter_reduce: lambda input, dim, index, src, reduce, include_self=True: -1,
1099torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
1100torch._segment_reduce: lambda data, reduce="max", lengths=None, indices=None, offsets=None, axis=0…
1101 torch.select: lambda input, dim, index: -1,
1102 torch.select_scatter: lambda input, src, dim, index: -1,
1103 torch.slice_inverse: lambda input, src, dim=0, start=None, end=None, step=1: -1,
1104 torch.slice_scatter: lambda input, src, dim=0, start=None, end=None, step=1: -1,
1105 torch.selu: lambda input, inplace=False: -1,
1106 torch.sigmoid: lambda input, out=None: -1,
1107 torch.sign: lambda input, out=None: -1,
1108 torch.signbit: lambda input, out=None: -1,
1109 torch.sgn: lambda input, out=None: -1,
1110 torch.sin: lambda input, out=None: -1,
1111 torch.sinc: lambda input, out=None: -1,
1112 torch.sinh: lambda input, out=None: -1,
1113 torch.slogdet: lambda input: -1,
1114 torch.linalg.slogdet: lambda input: -1,
1115 torch.smm: lambda input, mat2: -1,
1116 torch.spmm: lambda input, mat2: -1,
1117 torch.softmax: lambda input, dim, dtype=None: -1,
1118 torch.linalg.solve: lambda A, B, left=True, out=None: -1,
1119 torch.linalg.solve_ex: lambda A, B, left=True, check_errors=False, out=None: -1,
1120 torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
1121 torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
1122 torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
1123 torch.sqrt: lambda input, out=None: -1,
1124 torch.square: lambda input, out=None: -1,
1125 torch.squeeze: lambda input, dim=None, out=None: -1,
1126 torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
1127 torch.stack: lambda tensors, dim=0, out=None: -1,
1128 torch.std: lambda input, dim=None: -1,
1129 torch.std_mean: lambda input, dim=None: -1,
1130 torch.stft: (
1133 torch.sub: lambda input, other, out=None: -1,
1134 torch.subtract: lambda input, other, out=None: -1,
1135 torch.sum: lambda input, dim=None: -1,
1136 torch.sym_float: lambda input: -1,
1137 torch.sym_int: lambda input: -1,
1138 torch.sym_max: lambda a, b: -1,
1139 torch.sym_min: lambda a, b: -1,
1140 torch.sym_not: lambda input: -1,
1141 torch.sym_ite: lambda a, b, c: -1,
1142 torch._sym_sqrt: lambda input: -1,
1143 torch._sym_cos: lambda input: -1,
1144 torch._sym_cosh: lambda input: -1,
1145 torch._sym_sin: lambda input: -1,
1146 torch._sym_sinh: lambda input: -1,
1147 torch._sym_tan: lambda input: -1,
1148 torch._sym_tanh: lambda input: -1,
1149 torch._sym_asin: lambda input: -1,
1150 torch._sym_acos: lambda input: -1,
1151 torch._sym_atan: lambda input: -1,
1152 torch.nansum: lambda input, dim=None: -1,
1153 torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
1154 torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
1155 torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
1156 torch.linalg.svdvals: lambda input, out=None: -1,
1157 torch.swapaxes: lambda input, dim0, dim1: -1,
1158 torch.swapdims: lambda input, axis0, axis1: -1,
1159 torch.special.airy_ai: lambda input: -1,
1160 torch.special.bessel_j0: lambda input: -1,
1161 torch.special.bessel_j1: lambda input: -1,
1162 torch.special.bessel_y0: lambda input: -1,
1163 torch.special.bessel_y1: lambda input: -1,
1164 torch.special.chebyshev_polynomial_t: lambda input, n, out=None: -1,
1165 torch.special.chebyshev_polynomial_u: lambda input, n, out=None: -1,
1166 torch.special.chebyshev_polynomial_v: lambda input, n, out=None: -1,
1167 torch.special.chebyshev_polynomial_w: lambda input, n, out=None: -1,
1168 torch.special.digamma: lambda input: -1,
1169 torch.special.entr: lambda input: -1,
1170 torch.special.erf: lambda input: -1,
1171 torch.special.erfc: lambda input: -1,
1172 torch.special.erfcx: lambda input: -1,
1173 torch.special.erfinv: lambda input: -1,
1174 torch.special.exp2: lambda input: -1,
1175 torch.special.expit: lambda input: -1,
1176 torch.special.expm1: lambda input: -1,
1177 torch.special.gammainc: lambda input, other, out=None: -1,
1178 torch.special.gammaincc: lambda input, other, out=None: -1,
1179 torch.special.gammaln: lambda input: -1,
1180 torch.special.hermite_polynomial_h: lambda input, n, out=None: -1,
1181 torch.special.hermite_polynomial_he: lambda input, n, out=None: -1,
1182 torch.special.i0: lambda input: -1,
1183 torch.special.i0e: lambda input: -1,
1184 torch.special.i1: lambda input: -1,
1185 torch.special.i1e: lambda input: -1,
1186 torch.special.laguerre_polynomial_l: lambda input, n, out=None: -1,
1187 torch.special.legendre_polynomial_p: lambda input, n, out=None: -1,
1188 torch.special.log1p: lambda input: -1,
1189 torch.special.log_ndtr: lambda input: -1,
1190 torch.special.log_softmax: lambda input, dim, dtype=None: -1,
1191 torch.special.logit: lambda input: -1,
1192 torch.special.logsumexp: lambda input, dim, keepdim=False, out=None: -1,
1193 torch.special.modified_bessel_i0: lambda input: -1,
1194 torch.special.modified_bessel_i1: lambda input: -1,
1195 torch.special.modified_bessel_k0: lambda input: -1,
1196 torch.special.modified_bessel_k1: lambda input: -1,
1197 torch.special.multigammaln: lambda input, p: -1,
1198 torch.special.ndtr: lambda input: -1,
1199 torch.special.ndtri: lambda input: -1,
1200 torch.special.polygamma: lambda input, n, out=None: -1,
1201 torch.special.psi: lambda input: -1,
1202 torch.special.round: lambda input: -1,
1203 torch.special.scaled_modified_bessel_k0: lambda input: -1,
1204 torch.special.scaled_modified_bessel_k1: lambda input: -1,
1205 torch.special.shifted_chebyshev_polynomial_t: lambda input, n, out=None: -1,
1206 torch.special.shifted_chebyshev_polynomial_u: lambda input, n, out=None: -1,
1207 torch.special.shifted_chebyshev_polynomial_v: lambda input, n, out=None: -1,
1208 torch.special.shifted_chebyshev_polynomial_w: lambda input, n, out=None: -1,
1209 torch.special.sinc: lambda input: -1,
1210 torch.special.softmax: lambda input, dim, dtype=None: -1,
1211 torch.special.spherical_bessel_j0: lambda input: -1,
1212 torch.special.xlog1py: lambda input, other, out=None: -1,
1213 torch.special.xlogy: lambda input, other, out=None: -1,
1214 torch.special.zeta: lambda self, other, out=None: -1,
1215 torch.t: lambda input: -1,
1216 torch.take: lambda input, index: -1,
1217 torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
1218 torch.tan: lambda input, out=None: -1,
1219 torch.tanh: lambda input, out=None: -1,
1220 torch.linalg.tensorinv: lambda a, ind=2: -1,
1221 torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
1222 torch.tensordot: lambda a, b, dims=2, out=None: -1,
1223 torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
1224 torch.threshold: lambda input, threshold, value, inplace=False: -1,
1225 torch.tile: lambda input, dims: -1,
1226 torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
1227 torch.trace: lambda input: -1,
1228 torch.transpose: lambda input, dim0, dim1: -1,
1229 torch.trapz: lambda y, x=None, dim=-1: -1,
1230 torch.trapezoid: lambda y, x=None, dim=-1: -1,
1231torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
1232 torch.linalg.solve_triangular: lambda input, B, upper, left=True, unitriangular=False: -1,
1233 torch.tril: lambda input, diagonal=0, out=None: -1,
1234 torch.triplet_margin_loss: (
1237 torch.triu: lambda input, diagonal=0, out=None: -1,
1238 torch.true_divide: lambda input, other: -1,
1239 torch.trunc: lambda input, out=None: -1,
1240 torch.unbind: lambda input, dim=0: -1,
1241 torch.unflatten: lambda input, dim, sizes, names: -1,
1242torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
1243torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
1244 torch.unravel_index: lambda indices, shape: -1,
1245 torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
1246 torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
1247 torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
1248 torch.unsqueeze: lambda input, dim, out=None: -1,
1249 torch.linalg.vander: lambda x, N=None: -1,
1250 torch.var: lambda input, dim=None: -1,
1251 torch.var_mean: lambda input, dim=None: -1,
1252 torch.vsplit: lambda input, indices_or_sections: -1,
1253 torch.vstack: lambda tensors, out=None: -1,
1254 torch.where: lambda condition, x=None, y=None: -1,
1255 torch._wrapped_linear_prepack: lambda weight, weight_scale, weight_zero_point, bias : -1,
1256 torch._wrapped_quantized_linear_prepacked: (
1259torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
1260 torch._fw_primal_copy: lambda self, level: -1,
1261 torch._make_dual_copy: lambda primal, tangent, level: -1,
1262 torch.view_as_real_copy: lambda self: -1,
1263 torch.view_as_complex_copy: lambda self: -1,
1264 torch._conj_copy: lambda self: -1,
1265 torch._neg_view_copy: lambda self: -1,
1266 torch.as_strided_copy: lambda self, size, stride, storage_offset=None: -1,
1267 torch._sparse_broadcast_to_copy: lambda self, size: -1,
1268 torch.diagonal_copy: lambda self, offset=0, dim1=0, dim2=1: -1,
1269 torch.expand_copy: lambda self, size, *, implicit=False: -1,
1270 torch.narrow_copy: lambda self, dim, start, length: -1,
1271 torch.permute_copy: lambda self, dims: -1,
1272 torch._reshape_alias_copy: lambda self, size, stride: -1,
1273 torch.select_copy: lambda self, dim, index: -1,
1274 torch.detach_copy: lambda self: -1,
1275 torch.slice_copy: lambda self, dim=0, start=None, end=None, step=1: -1,
1276 torch.split_copy: lambda self, split_size, dim=0: -1,
1277 torch.split_with_sizes_copy: lambda self, split_sizes, dim=0: -1,
1278 torch.squeeze_copy: lambda self, dim: -1,
1279 torch.t_copy: lambda self: -1,
1280 torch.transpose_copy: lambda self, dim0, dim1: -1,
1281 torch.unsqueeze_copy: lambda self, dim: -1,
1282 torch._indices_copy: lambda self: -1,
1283 torch._values_copy: lambda self: -1,
1284 torch.indices_copy: lambda self: -1,
1285 torch.values_copy: lambda self: -1,
1286 torch.crow_indices_copy: lambda self: -1,
1287 torch.col_indices_copy: lambda self: -1,
1288 torch.ccol_indices_copy: lambda self: -1,
1289 torch.row_indices_copy: lambda self: -1,
1290 torch.unbind_copy: lambda self, dim=0: -1,
1291 torch.view_copy: lambda self, dtype: -1,
1292 torch.unfold_copy: lambda self, dimension, size, step: -1,
1293 torch.alias_copy: lambda self: -1,
1399 Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
1400 Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
1401 Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
1402 Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
1406 Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
1408 Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
1409 Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
1410 Tensor.mtia: lambda self, memory_format=torch.preserve_format: -1,
1411 Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
1412 Tensor.ipu: lambda self, memory_format=torch.preserve_format: -1,
1418 Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
1419 Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1,
1426 Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
1427 Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1,
1430 Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
1431 Tensor.chalf: lambda self, memory_format=torch.preserve_format: -1,
1434 Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
1444 Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
1478 Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
1493 …Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format…
1508 torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
1512 torch.utils.backend_registration._privateuse1_backend_name
1569 >>> @torch.overrides.wrap_torch_function(dispatcher)
1604 See torch::append_overloaded_arg for the equivalent function in the C++
1628 # If torch function is not enabled, there are no overloaded types
1629 if not torch._C._is_torch_function_enabled():
1645 and arg_type.__torch_function__ != torch._C._disabled_torch_function_impl
1674 See torch::autograd::handle_torch_function for the equivalent of this
1680 Function exposed by the public torch API originally called like
1715 # this unsets it and calls directly into TorchFunctionStackMode's torch function
1729 and torch_func_method is not torch._C._disabled_torch_function_impl
1772 torch.is_tensor_like
1810 ("torch", torch, torch.__all__),
1811 ("torch.functional", torch.functional, torch.functional.__all__),
1812 ("torch.nn.functional", torch.nn.functional, dir(torch.nn.functional)),
1813 ("torch.nn.init", torch.nn.init, dir(torch.nn.init)),
1814 ("torch.Tensor", torch.Tensor, dir(torch.Tensor)),
1815 ("torch.linalg", torch.linalg, dir(torch.linalg)),
1816 ("torch.fft", torch.fft, dir(torch.fft)),
1817 ("torch.special", torch.special, dir(torch.special)),
1822 # ignore private functions or functions that are deleted in torch.__init__
1823 if namespace is not torch.Tensor:
1841 if namespace is torch.Tensor and getattr(object, func_name, None) == func:
1857 "{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
1879 "{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
1919 if isinstance(f, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):
1926 """Returns a set of the overridable methods on ``torch.Tensor``"""
1928 methods = set(overridable_funcs[torch.Tensor])
1936 method or property belonging to ``torch.Tensor``, as passed
1946 of ``torch.Tensor``.
1950 >>> is_tensor_method_or_property(torch.Tensor.add)
1952 >>> is_tensor_method_or_property(torch.add)
1969 >>> class SubTensor(torch.Tensor): ...
1992 return type(inp) is torch.Tensor or hasattr(inp, "__torch_function__")
2089 # `enable_reentrant_dispatch = torch._C._RestorePythonTLSSnapshot`
2091 # 1. torch._C._RestorePythonTLSSnapshot is unavailable when this file
2095 with torch._C._RestorePythonTLSSnapshot():