• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019-2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_DEVICE_ADDRESS_H_
18 #define MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_DEVICE_ADDRESS_H_
19 
20 #include <string>
21 #include <vector>
22 #include "include/backend/device_address.h"
23 #include "runtime/device/loadable_device_address.h"
24 
25 using ShapeVecotr = std::vector<int>;
26 
27 namespace mindspore {
28 #ifdef ENABLE_DEBUGGER
29 class Debugger;
30 #endif
31 namespace device {
32 namespace gpu {
33 class GPUDeviceAddress : public LoadableDeviceAddress {
34  public:
GPUDeviceAddress(const KernelTensorPtr & kernel_tensor)35   explicit GPUDeviceAddress(const KernelTensorPtr &kernel_tensor) : LoadableDeviceAddress(kernel_tensor) {
36     SetDevicePtrDeleter();
37   }
GPUDeviceAddress(void * ptr,size_t size)38   GPUDeviceAddress(void *ptr, size_t size) : LoadableDeviceAddress(ptr, size) { SetDevicePtrDeleter(); }
GPUDeviceAddress(void * ptr,size_t size,const string & format,TypeId type_id)39   GPUDeviceAddress(void *ptr, size_t size, const string &format, TypeId type_id)
40       : LoadableDeviceAddress(ptr, size, format, type_id) {
41     SetDevicePtrDeleter();
42   }
GPUDeviceAddress(void * ptr,size_t size,const std::string & format,TypeId type_id,const KernelWithIndex & node_index)43   GPUDeviceAddress(void *ptr, size_t size, const std::string &format, TypeId type_id, const KernelWithIndex &node_index)
44       : LoadableDeviceAddress(ptr, size, format, type_id, node_index) {
45     SetDevicePtrDeleter();
46   }
GPUDeviceAddress(void * ptr,size_t size,const std::string & format,TypeId type_id,const std::string & device_name,uint32_t device_id)47   GPUDeviceAddress(void *ptr, size_t size, const std::string &format, TypeId type_id, const std::string &device_name,
48                    uint32_t device_id)
49       : LoadableDeviceAddress(ptr, size, format, type_id, device_name, device_id) {
50     SetDevicePtrDeleter();
51   }
GPUDeviceAddress(void * ptr,size_t size,const ShapeVector & shape_vector,const Format & format,TypeId type_id,const std::string & device_name,uint32_t device_id,uint32_t stream_id)52   GPUDeviceAddress(void *ptr, size_t size, const ShapeVector &shape_vector, const Format &format, TypeId type_id,
53                    const std::string &device_name, uint32_t device_id, uint32_t stream_id)
54       : LoadableDeviceAddress(ptr, size, shape_vector, format, type_id, device_name, device_id, stream_id) {
55     SetDevicePtrDeleter();
56   }
57   ~GPUDeviceAddress() override;
58   void DeviceSynchronizerInit() override;
59 
60   bool SyncDeviceToHost(size_t size, void *host_ptr) const override;
61   bool SyncHostToDevice(size_t size, const void *host_ptr) const override;
62   bool SyncDeviceToHost(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const override;
63   bool SyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr,
64                         const std::string &format) const override;
65   bool SyncDeviceToDevice(const DeviceSync *src_device_addr) const override;
66   bool SyncDeviceToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *src_ptr,
67                           const std::string &format) const override;
68   bool CopyDeviceToHost(void *dst, const void *src, const size_t &size) const override;
69   bool CopyHostToDevice(void *dst, const void *src, const size_t &size) const override;
70 
71   void ClearDeviceMemory() override;
GetDeviceType()72   DeviceType GetDeviceType() const override { return DeviceType::kGPU; }
73 
74 #ifdef ENABLE_DEBUGGER
75   bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
76                      const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
77                      uint32_t root_graph_id, bool force_update, bool trans_flag, bool async_copy = true) const override;
78 #endif
79 
80   // Asynchronously copy host memory to device side.
81   bool AsyncHostToDevice(const ShapeVector &, size_t size, TypeId, const void *host_ptr,
82                          size_t stream_id) const override;
83 
84   // Asynchronously copy device memory to host side.
85   bool AsyncDeviceToHost(const ShapeVector &, size_t size, TypeId, void *host_ptr, size_t stream_id) const override;
86 
87   bool AsyncHostToDevice(size_t size, const void *host_ptr) const override;
88 
89   bool AsyncDeviceToHost(size_t size, void *host_ptr) const override;
90 
91   void ClearUserData() override;
92 
93  protected:
94   bool CopyDeviceToHost(void *dst, const void *src, size_t size, bool async, size_t stream_id) const override;
95   bool CopyHostToDevice(void *dst, const void *src, size_t size, bool async, size_t stream_id) const override;
96 
97  private:
98   bool CopyBetweenHostDevice(void *dst, const void *src, size_t size, bool async, size_t stream_id,
99                              bool host_to_device) const;
100 
101   // Set a device pointer destructor to kernel tensor, used to release resource reclaiming of the device pointer
102   // automatically when DeviceAddress destructed.
103   void SetDevicePtrDeleter();
104 };
105 }  // namespace gpu
106 }  // namespace device
107 }  // namespace mindspore
108 
109 #endif  // MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_DEVICE_ADDRESS_H_
110