Home
last modified time | relevance | path

Searched full:split_with_sizes (Results 1 – 25 of 77) sorted by relevance

1234

/external/pytorch/test/inductor/
Dtest_pattern_matcher.py837 split_with_sizes = torch.ops.aten.split_with_sizes.default(a, [8, 24], 1)
838 getitem = split_with_sizes[0]
839 getitem_1 = split_with_sizes[1]
850 split_with_sizes = torch.ops.aten.split_with_sizes.default(a, [8, 8, 16], 1)
851 getitem = split_with_sizes[0]
852 getitem_1 = split_with_sizes[1]
853 getitem_2 = split_with_sizes[2]
864 split_with_sizes = torch.ops.aten.split_with_sizes.default(
867 cat = torch.ops.aten.cat.default(split_with_sizes, 0)
877 x = torch.ops.aten.split_with_sizes.default(a, [3, 2, 3], dim=1)
[all …]
/external/pytorch/aten/src/ATen/native/nested/
DNestedTensorUtils.cpp119 TORCH_CHECK_INDEX(false, "split_with_sizes() cannot be applied to a 0-dim tensor."); in split_with_sizes_nested()
123 … "split_with_sizes for nested tensors is currently only supported for the last dimension."); in split_with_sizes_nested()
126 "split_with_sizes expects number of splits to be greater than 0, got: ", num_splits); in split_with_sizes_nested()
127 TORCH_CHECK(self.is_contiguous(), "split_with_sizes expects `self` to be contiguous."); in split_with_sizes_nested()
137 "split_with_sizes expects split_sizes to sum exactly to ", self_size, in split_with_sizes_nested()
/external/pytorch/test/jit/
Dtest_autodiff_subgraph_slicing.py462 # to merge both split_with_sizes in relu in one graph
476 # Case 2: aliasing between relu and split_with_sizes
478 # to merge both split_with_sizes in relu in one graph
479 # i.e. relu and split_with_sizes should be in different
486 %2 : Tensor[] = aten::split_with_sizes(%b, %0, %1)
495 ).check("Tensor = aten::relu").check_not("aten::split_with_sizes").run(
500 # Both `split_with_sizes` should be unfused
507 %2 : Tensor[] = aten::split_with_sizes(%b, %s1, %1)
508 %3 : Tensor[] = aten::split_with_sizes(%b, %s2, %1)
517 ).check("Tensor = aten::relu").check_not("aten::split_with_sizes").run(
/external/executorch/backends/qualcomm/builders/
Dop_split_with_sizes.py20 target = ["aten.split_with_sizes.default"]
43 # split_with_sizes will return a tuple since it has multiple outputs
/external/pytorch/torch/csrc/jit/passes/onnx/
Dpreprocess_for_onnx.cpp41 // split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
46 // %8 : Tensor[] = aten::split_with_sizes(%input, %13, %7)
59 // aten::split_with_sizes(%input, %13, %7, %8) return (%14, %15, %16)
96 case aten::split_with_sizes: in FuseWithListUnpack()
/external/pytorch/torch/csrc/cuda/
Dcomm.cpp285 tensor.split_with_sizes(/*split_sizes=*/chunk_sizes, /*dim=*/dim); in _broadcast_out_impl()
332 ? tensor.split_with_sizes(/*split_sizes=*/*chunk_sizes, /*dim=*/dim) in _broadcast_out_impl()
384 out_tensor.split_with_sizes(/*split_sizes=*/chunk_sizes, /*dim=*/dim); in _broadcast_out_impl()
/external/executorch/backends/arm/quantizer/quantization_annotation/
Dgeneric_annotator.py49 torch.ops.aten.split_with_sizes.default,
/external/pytorch/torch/_inductor/fx_passes/
Dpost_grad.py607 split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
625 # For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1),
902 aten.split_with_sizes,
922 split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
957 aten.split_with_sizes,
/external/executorch/backends/arm/test/ops/
Dtest_split.py38 return x.split_with_sizes(split_sizes=split_sizes, dim=dim)
/external/pytorch/functorch/op_analysis/
Dpublic_api202 split_with_sizes
Dannotated_ops253 split_with_sizes, view/reshape
/external/pytorch/docs/source/
Dtensor_view.rst81 - :meth:`~torch.Tensor.split_with_sizes`
/external/pytorch/aten/src/ATen/native/
DMathBitFallThroughLists.h37 m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
/external/pytorch/torch/csrc/jit/runtime/
Dregister_special_ops.cpp248 RECORD_FUNCTION("split_with_sizes", last(stack, 3)); in __anon0ef288b80202()
250 auto result = at::split_with_sizes( in __anon0ef288b80202()
/external/pytorch/test/
Dtest_functionalization.py858 # test: view ops that return multiple tensors (split_with_sizes)
860 y1, y2 = x.split_with_sizes([2, 2])
908 split_with_sizes = torch.ops.aten.split_with_sizes.default(arg0_1, [2, 2])
909 getitem = split_with_sizes[0]
910 getitem_1 = split_with_sizes[1]; split_with_sizes = getitem_1 = None
913 split_with_sizes_1 = torch.ops.aten.split_with_sizes.default(arg0_1, [2, 2])
918 split_with_sizes_2 = torch.ops.aten.split_with_sizes.default(slice_scatter, [2, 2])
Dtest_proxy_tensor.py1572split_with_sizes = torch.ops.aten.split_with_sizes.default(values_1, [_local_scalar_dense, _local_…
1573 getitem = split_with_sizes[0]
1574 getitem_1 = split_with_sizes[1]
1575 getitem_2 = split_with_sizes[2]; split_with_sizes = None
/external/pytorch/torch/csrc/jit/runtime/static/
Dnative_ops.cpp736 at::native::split_with_sizes(self, split_sizes.vec(), dim); in __anon69571ad14602()
745 aten::split_with_sizes,
749 … "aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]")) && in __anon69571ad14902()
751 … "aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> (Tensor[])"))) { in __anon69571ad14902()
760 at::native::split_with_sizes(self, split_sizes.vec(), dim); in __anon69571ad14902()
/external/pytorch/test/cpp/api/
Dinference_mode.cpp482 b = s_view.split_with_sizes({1, 1}); in TEST()
485 c = s.split_with_sizes({1, 1}); in TEST()
/external/executorch/backends/qualcomm/_passes/
Dlayout_transform.py72 exir_ops.edge.aten.split_with_sizes.default,
/external/executorch/exir/tests/
Dtest_delegate.py292 …# TODO(ssjia): split.Tensor now gets decomposed to split_with_sizes. Due to how executorch uses a …
/external/pytorch/torch/onnx/
Dsymbolic_opset13.py118 @_onnx_symbolic("aten::split_with_sizes")
119 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): function
134 return split_with_sizes(g, self, split_sizes, dim, _outputs)
/external/pytorch/aten/src/ATen/native/nested/cuda/
DNestedTensorTransformerFunctions.cpp196 at::split_with_sizes(metadata, {offsets.numel(), nt_sizes.numel()}, 0); in NestedTensor_to_padded_tensor_cuda()
/external/pytorch/test/expect/
DHasDecompTest.test_aten_core_operators.expect483 aten::split_with_sizes
/external/pytorch/aten/src/ATen/native/cuda/
DTensorShape.cu728 "split_with_sizes expects split_sizes have only non-negative ", in split_with_sizes_copy_out_cuda()
735 "split_with_sizes expects split_sizes to sum exactly to ", in split_with_sizes_copy_out_cuda()
/external/pytorch/test/mobile/model_test/
Dmodel_ops.yaml337 aten::split_with_sizes: 1

1234