• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CPU_UTILS_CPU_AUX_TENSOR_HANDLER_H
25 #define ARM_COMPUTE_CPU_UTILS_CPU_AUX_TENSOR_HANDLER_H
26 
27 #include "arm_compute/core/ITensorPack.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/runtime/Tensor.h"
30 
31 #include "src/common/utils/Log.h"
32 #include "support/Cast.h"
33 
34 namespace arm_compute
35 {
36 namespace cpu
37 {
38 /* Tensor handler to wrap and handle tensor allocations on workspace buffers */
39 class CpuAuxTensorHandler
40 {
41 public:
42     CpuAuxTensorHandler(int slot_id, TensorInfo &info, ITensorPack &pack, bool pack_inject = false, bool bypass_alloc = false)
_tensor()43         : _tensor()
44     {
45         if(info.total_size() == 0)
46         {
47             return;
48         }
49         _tensor.allocator()->soft_init(info);
50 
51         ITensor *packed_tensor = utils::cast::polymorphic_downcast<ITensor *>(pack.get_tensor(slot_id));
52         if((packed_tensor == nullptr) || (info.total_size() > packed_tensor->info()->total_size()))
53         {
54             if(!bypass_alloc)
55             {
56                 _tensor.allocator()->allocate();
57                 ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL("Allocating auxiliary tensor");
58             }
59 
60             if(pack_inject)
61             {
62                 pack.add_tensor(slot_id, &_tensor);
63                 _injected_tensor_pack = &pack;
64                 _injected_slot_id     = slot_id;
65             }
66         }
67         else
68         {
69             _tensor.allocator()->import_memory(packed_tensor->buffer());
70         }
71     }
72 
CpuAuxTensorHandler(TensorInfo & info,ITensor & tensor)73     CpuAuxTensorHandler(TensorInfo &info, ITensor &tensor)
74         : _tensor()
75     {
76         _tensor.allocator()->soft_init(info);
77         if(info.total_size() <= tensor.info()->total_size())
78         {
79             _tensor.allocator()->import_memory(tensor.buffer());
80         }
81     }
82 
83     CpuAuxTensorHandler(const CpuAuxTensorHandler &) = delete;
84     CpuAuxTensorHandler &operator=(const CpuAuxTensorHandler) = delete;
85 
~CpuAuxTensorHandler()86     ~CpuAuxTensorHandler()
87     {
88         if(_injected_tensor_pack)
89         {
90             _injected_tensor_pack->remove_tensor(_injected_slot_id);
91         }
92     }
93 
get()94     ITensor *get()
95     {
96         return &_tensor;
97     }
98 
operator()99     ITensor *operator()()
100     {
101         return &_tensor;
102     }
103 
104 private:
105     Tensor       _tensor{};
106     ITensorPack *_injected_tensor_pack{ nullptr };
107     int          _injected_slot_id{ TensorType::ACL_UNKNOWN };
108 };
109 } // namespace cpu
110 } // namespace arm_compute
111 #endif /* ARM_COMPUTE_CPU_UTILS_CPU_AUX_TENSOR_HANDLER_H */