• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2 * Copyright 2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15  */
16 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NNRT_NNRT_ALLOCATOR_H_
17 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NNRT_NNRT_ALLOCATOR_H_
18 
19 #include <vector>
20 #include <map>
21 #include <atomic>
22 #include <unordered_map>
23 #include <map>
24 #include <mutex>
25 #include "include/api/allocator.h"
26 #include "src/tensor.h"
27 #include "neural_network_runtime/neural_network_runtime.h"
28 
29 struct OH_NN_Memory;
30 struct OH_NNExecutor;
31 
32 namespace mindspore {
33 namespace lite {
34 
35 class NNRTAllocator : public Allocator {
36  public:
NNRTAllocator()37   NNRTAllocator() {}
38   ~NNRTAllocator() override;
39   static std::shared_ptr<NNRTAllocator> GetInstance();
40   void *Malloc(size_t size) override;
41   void *MallocByDesc(size_t size, const std::vector<int> &shape, const TypeId data_type, const Format format,
42                      const std::string &name);
43   NN_TensorDesc *CreateNNRtTensorDesc(const std::vector<int> &shape, const TypeId data_type, const Format format,
44                                       const std::string &name);
45   OH_NN_ReturnCode SetTensorDesc(NN_TensorDesc *tensor_desc, const std::vector<int> &shape, const TypeId data_type,
46                                  const Format format, const std::string &name);
47   void Free(void *ptr) override;
48   int RefCount(void *ptr) override;
49   int SetRefCount(void *ptr, int ref_count) override;
50   int DecRefCount(void *ptr, int ref_count) override;
51   int IncRefCount(void *ptr, int ref_count) override;
GetNNTensor(void * ptr)52   NN_Tensor *GetNNTensor(void *ptr) {
53     std::lock_guard<std::mutex> locker(mutex_);
54     auto iter = allocated_list_.find(ptr);
55     if (iter != allocated_list_.end()) {
56       return iter->second->tensor_;
57     }
58     return nullptr;
59   }
SetDeviceId(size_t id)60   void SetDeviceId(size_t id) { device_id_ = id; }
61   void ClearFreeList();
62   void FreeAllocatedTensor(void *data, lite::Tensor *tensor);
AddAllocatedLiteTensor(void * data,lite::Tensor * tensor)63   void AddAllocatedLiteTensor(void *data, lite::Tensor *tensor) {
64     if (data == nullptr) {
65       return;
66     }
67     std::lock_guard<std::mutex> locker(mutex_);
68     allocated_lite_tensors_[data] = tensor;
69   }
70 
71  private:
72   struct MemBuf {
73     std::atomic_int ref_count_{0};
74     NN_TensorDesc *tensor_desc_{nullptr};
75     NN_Tensor *tensor_{nullptr};
76     void *data{nullptr};
77     size_t size{0};
78   };
79 
80   size_t device_id_{0};
81   OH_NNExecutor *executor_{nullptr};
82   std::mutex mutex_;
83   // <membuf->memory_->data, membuf>
84   std::unordered_map<void *, MemBuf *> allocated_list_;
85   std::multimap<size_t, MemBuf *> free_list_;
86   std::unordered_map<void *, lite::Tensor *> allocated_lite_tensors_;
87 };
88 
89 }  // namespace lite
90 }  // namespace mindspore
91 
92 #endif  // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NNRT_NNRT_ALLOCATOR_H_
93