1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
17
18 #include <string>
19
20 #include "tensorflow/lite/delegates/gpu/common/data_type.h"
21 #include "tensorflow/lite/delegates/gpu/common/status.h"
22
23 namespace tflite {
24 namespace gpu {
25 namespace cl {
26 namespace {
27
CreateBuffer(size_t size_in_bytes,bool gpu_read_only,const void * data,CLContext * context,Buffer * result)28 absl::Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only,
29 const void* data, CLContext* context,
30 Buffer* result) {
31 cl_mem buffer;
32 RETURN_IF_ERROR(CreateCLBuffer(context->context(), size_in_bytes,
33 gpu_read_only, const_cast<void*>(data),
34 &buffer));
35 *result = Buffer(buffer, size_in_bytes);
36
37 return absl::OkStatus();
38 }
39 } // namespace
40
Buffer(cl_mem buffer,size_t size_in_bytes)41 Buffer::Buffer(cl_mem buffer, size_t size_in_bytes)
42 : buffer_(buffer), size_(size_in_bytes) {}
43
Buffer(Buffer && buffer)44 Buffer::Buffer(Buffer&& buffer) : buffer_(buffer.buffer_), size_(buffer.size_) {
45 buffer.buffer_ = nullptr;
46 buffer.size_ = 0;
47 }
48
operator =(Buffer && buffer)49 Buffer& Buffer::operator=(Buffer&& buffer) {
50 if (this != &buffer) {
51 Release();
52 std::swap(size_, buffer.size_);
53 std::swap(buffer_, buffer.buffer_);
54 }
55 return *this;
56 }
57
Release()58 void Buffer::Release() {
59 if (buffer_) {
60 clReleaseMemObject(buffer_);
61 buffer_ = nullptr;
62 size_ = 0;
63 }
64 }
65
GetGPUResources(const GPUObjectDescriptor * obj_ptr,GPUResourcesWithValue * resources) const66 absl::Status Buffer::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
67 GPUResourcesWithValue* resources) const {
68 const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
69 if (!buffer_desc) {
70 return absl::InvalidArgumentError("Expected BufferDescriptor on input.");
71 }
72
73 resources->buffers.push_back({"buffer", buffer_});
74 return absl::OkStatus();
75 }
76
CreateFromBufferDescriptor(const BufferDescriptor & desc,CLContext * context)77 absl::Status Buffer::CreateFromBufferDescriptor(const BufferDescriptor& desc,
78 CLContext* context) {
79 bool read_only = desc.memory_type == MemoryType::CONSTANT;
80 uint8_t* data_ptr = desc.data.empty()
81 ? nullptr
82 : const_cast<unsigned char*>(desc.data.data());
83 size_ = desc.size;
84 return CreateCLBuffer(context->context(), desc.size, read_only, data_ptr,
85 &buffer_);
86 }
87
CreateReadOnlyBuffer(size_t size_in_bytes,CLContext * context,Buffer * result)88 absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
89 Buffer* result) {
90 return CreateBuffer(size_in_bytes, true, nullptr, context, result);
91 }
92
CreateReadOnlyBuffer(size_t size_in_bytes,const void * data,CLContext * context,Buffer * result)93 absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
94 CLContext* context, Buffer* result) {
95 return CreateBuffer(size_in_bytes, true, data, context, result);
96 }
97
CreateReadWriteBuffer(size_t size_in_bytes,CLContext * context,Buffer * result)98 absl::Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
99 Buffer* result) {
100 return CreateBuffer(size_in_bytes, false, nullptr, context, result);
101 }
102
103 } // namespace cl
104 } // namespace gpu
105 } // namespace tflite
106