• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSORS_QUEUE_H_
18 #define MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSORS_QUEUE_H_
19 
20 #include <vector>
21 #include <queue>
22 #include <string>
23 #include <memory>
24 #include "include/backend/kernel_graph.h"
25 #include "include/backend/anf_runtime_algorithm.h"
26 #include "include/common/utils/anfalgo.h"
27 #include "kernel/kernel.h"
28 #include "include/backend/mem_reuse/mem_dynamic_allocator.h"
29 
30 namespace mindspore {
31 namespace device {
32 class BACKEND_EXPORT TensorsQueue {
33  public:
34   // Base TensorsQueue. Constructed by name, dtype, size, elements_num and shapes.
TensorsQueue(const string & name,const TypePtr & dtype,const int64_t size,const int64_t elements_num,const std::vector<std::vector<int64_t>> & shapes)35   TensorsQueue(const string &name, const TypePtr &dtype, const int64_t size, const int64_t elements_num,
36                const std::vector<std::vector<int64_t>> &shapes)
37       : name_(name), dtype_(dtype), shapes_(shapes), size_(size), elements_num_(elements_num) {}
38   virtual ~TensorsQueue() = default;
39   virtual void CreateTensorsQueue();
40 
41   // These three function (FreeMemory, AllocateMemory and ClearMemory) are related with devices.
42   // These should be achieved with different devices.
43   virtual void FreeMemory(const DeviceMemPtr addr) = 0;
44   virtual void *AllocateMemory(const size_t size) = 0;
45   virtual void ClearMemory(void *addr, const size_t size) = 0;
46 
47   // When memory operations are involved, we need to determine whether to use streams according to the device.
48   virtual bool Put(const mindspore::kernel::AddressPtrList &dev_addr);
49   virtual bool Put(const mindspore::kernel::AddressPtrList &dev_addr, void *stream);
50   virtual void CopyTensor(const mindspore::kernel::AddressPtr &dst, const mindspore::kernel::AddressPtr &src);
51   virtual void CopyTensor(const mindspore::kernel::AddressPtr &dst, const mindspore::kernel::AddressPtr &src,
52                           void *stream);
53   virtual bool Get(const mindspore::kernel::AddressPtrList &dev_addr, const bool &pop_after_get);
54   virtual bool Get(const mindspore::kernel::AddressPtrList &dev_addr, const bool &pop_after_get, void *stream);
55 
56   // Common functions for TensorsQueue which are device independent.
57   virtual void Clear();
58   virtual void Free();
59   virtual size_t AvailableSize();
60   virtual bool IsFull();
61   virtual bool IsEmpty();
62 
63  protected:
64   std::string name_;
65   TypePtr dtype_;
66   std::vector<std::vector<int64_t>> shapes_;
67   int64_t size_;
68   int64_t elements_num_;
69 
70  private:
71   // Using a vector of address list to store the tensors.
72   // Using to cursors to simulate the behavior of circular queue.
73   std::vector<mindspore::kernel::AddressPtrList> tensors_q_;
74   size_t front_ = 0;
75   size_t rear_ = 0;
76 };
77 using TensorsQueuePtr = std::shared_ptr<TensorsQueue>;
78 }  // namespace device
79 }  // namespace mindspore
80 
81 #endif  // MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSORS_QUEUE_H_
82