1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/portable/cpu/util/padding_util.h>
10 #include <executorch/runtime/kernel/kernel_includes.h>
11
12 namespace torch {
13 namespace executor {
14 namespace native {
15
16 using Tensor = exec_aten::Tensor;
17
reflection_pad1d_out(KernelRuntimeContext & ctx,const Tensor & in,exec_aten::ArrayRef<int64_t> padding,Tensor & out)18 Tensor& reflection_pad1d_out(
19 KernelRuntimeContext& ctx,
20 const Tensor& in,
21 exec_aten::ArrayRef<int64_t> padding,
22 Tensor& out) {
23 (void)ctx;
24
25 ET_KERNEL_CHECK(
26 ctx,
27 check_padding_args(1, in, padding, out, /*reflection*/ true),
28 InvalidArgument,
29 out);
30
31 ET_KERNEL_CHECK(
32 ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
33
34 ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
35
36 Tensor::SizesType target_sizes[kTensorDimensionLimit];
37 size_t target_ndim = 0;
38 get_padding_out_target_size(1, in, padding, target_sizes, &target_ndim);
39
40 ET_KERNEL_CHECK(
41 ctx,
42 resize_tensor(out, {target_sizes, target_ndim}) == Error::Ok,
43 InvalidArgument,
44 out);
45
46 ScalarType in_type = in.scalar_type();
47 constexpr auto name = "reflection_pad1d.out";
48
49 ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
50 pad1d<CTYPE>(reflection_ix, in, out, padding);
51 });
52
53 return out;
54 }
55
56 } // namespace native
57 } // namespace executor
58 } // namespace torch
59