1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
10 #include <executorch/runtime/kernel/kernel_includes.h>
11
12 namespace torch {
13 namespace executor {
14 namespace native {
15
16 using Tensor = exec_aten::Tensor;
17 using ScalarType = exec_aten::ScalarType;
18
as_strided_copy_out(KernelRuntimeContext & ctx,const Tensor & in,ArrayRef<int64_t> size,ArrayRef<int64_t> stride,optional<int64_t> storage_offset,Tensor & out)19 Tensor& as_strided_copy_out(
20 KernelRuntimeContext& ctx,
21 const Tensor& in,
22 ArrayRef<int64_t> size,
23 ArrayRef<int64_t> stride,
24 optional<int64_t> storage_offset,
25 Tensor& out) {
26 (void)ctx;
27
28 ET_KERNEL_CHECK(
29 ctx,
30 check_as_strided_copy_args(in, size, stride, storage_offset, out),
31 InvalidArgument,
32 out);
33
34 ET_KERNEL_CHECK(
35 ctx,
36 resize_tensor(out, size) == torch::executor::Error::Ok,
37 InvalidArgument,
38 out);
39
40 ET_KERNEL_CHECK(
41 ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
42
43 ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
44
45 if (in.numel() == 0) {
46 return out;
47 }
48
49 size_t offset = storage_offset.has_value() ? storage_offset.value() : 0;
50
51 ET_SWITCH_ALL_TYPES(in.scalar_type(), ctx, __func__, CTYPE, [&] {
52 as_strided_copy<CTYPE>(in, size, stride, offset, out);
53 });
54
55 return out;
56 }
57
58 } // namespace native
59 } // namespace executor
60 } // namespace torch
61