• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "DMABUFHEAPS"
18 
19 #include <BufferAllocator/BufferAllocator.h>
20 
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <ion/ion.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-heap.h>
26 #include <linux/ion_4.12.h>
27 #include <stdlib.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 
31 #include <mutex>
32 #include <shared_mutex>
33 #include <string>
34 #include <unordered_set>
35 
36 #include <android-base/logging.h>
37 #include <android-base/unique_fd.h>
38 
39 static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/";
40 static constexpr char kIonDevice[] = "/dev/ion";
41 static constexpr char kIonSystemHeapName[] = "ion_system_heap";
42 
LogInterface(const std::string & interface)43 void BufferAllocator::LogInterface(const std::string& interface) {
44     if (!logged_interface_) {
45         LOG(INFO) << "Using : " << interface;
46         logged_interface_ = true;
47     }
48 }
49 
OpenDmabufHeap(const std::string & heap_name)50 int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
51     std::shared_lock<std::shared_mutex> slock(dmabuf_heap_fd_mutex_);
52 
53     /* Check if heap has already been opened. */
54     auto it = dmabuf_heap_fds_.find(heap_name);
55     if (it != dmabuf_heap_fds_.end())
56         return it->second;
57 
58     slock.unlock();
59 
60     /*
61      * Heap device needs to be opened, use a unique_lock since dmabuf_heap_fd_
62      * needs to be modified.
63      */
64     std::unique_lock<std::shared_mutex> ulock(dmabuf_heap_fd_mutex_);
65 
66     /*
67      * Check if we already opened this heap again to prevent racing threads from
68      * opening the heap device multiple times.
69      */
70     it = dmabuf_heap_fds_.find(heap_name);
71     if (it != dmabuf_heap_fds_.end()) return it->second;
72 
73     std::string heap_path = kDmaHeapRoot + heap_name;
74     int fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC));
75     if (fd < 0) return -errno;
76 
77     LOG(INFO) << "Using DMA-BUF heap named: " << heap_name;
78 
79     auto ret = dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
80     CHECK(ret.second);
81     return fd;
82 }
83 
QueryIonHeaps()84 void BufferAllocator::QueryIonHeaps() {
85     uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_);
86     if (uses_legacy_ion_iface_) {
87         LogInterface("Legacy ion heaps");
88         MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED);
89         MapNameToIonMask(kDmabufSystemUncachedHeapName, ION_HEAP_SYSTEM_MASK);
90         return;
91     }
92 
93     int heap_count;
94     int ret = ion_query_heap_cnt(ion_fd_, &heap_count);
95     if (ret == 0) {
96         ion_heap_info_.resize(heap_count, {});
97         ret = ion_query_get_heaps(ion_fd_, heap_count, ion_heap_info_.data());
98     }
99 
100     // Abort if heap query fails
101     CHECK(ret == 0)
102             << "Non-legacy ION implementation must support heap information queries";
103     LogInterface("Non-legacy ION heaps");
104 
105     /*
106      * No error checking here, it is possible that devices may have used another name for
107      * the ion system heap.
108      */
109     MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName, ION_FLAG_CACHED);
110     MapNameToIonName(kDmabufSystemUncachedHeapName, kIonSystemHeapName);
111 }
112 
BufferAllocator()113 BufferAllocator::BufferAllocator() {
114     ion_fd_.reset(TEMP_FAILURE_RETRY(open(kIonDevice, O_RDONLY| O_CLOEXEC)));
115     if (ion_fd_ >= 0)
116         QueryIonHeaps();
117 }
118 
MapNameToIonMask(const std::string & heap_name,unsigned int ion_heap_mask,unsigned int ion_heap_flags)119 int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
120                                       unsigned int ion_heap_flags) {
121     if (!ion_heap_mask)
122         return -EINVAL;
123     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
124 
125     std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
126     heap_name_to_config_[heap_name] = heap_config;
127     return 0;
128 }
129 
GetIonHeapIdByName(const std::string & heap_name,unsigned int * heap_id)130 int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) {
131     for (auto& it : ion_heap_info_) {
132         if (heap_name == it.name) {
133             *heap_id = it.heap_id;
134             return 0;
135         }
136     }
137 
138     LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
139     return -EINVAL;
140 }
141 
MapNameToIonName(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags)142 int BufferAllocator::MapNameToIonName(const std::string& heap_name,
143                                       const std::string& ion_heap_name,
144                                       unsigned int ion_heap_flags) {
145     unsigned int ion_heap_id = 0;
146     auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id);
147     if (ret < 0)
148         return ret;
149 
150     unsigned int ion_heap_mask = 1 << ion_heap_id;
151     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
152 
153     std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
154     heap_name_to_config_[heap_name] = heap_config;
155 
156     return 0;
157 }
158 
MapNameToIonHeap(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags,unsigned int legacy_ion_heap_mask,unsigned int legacy_ion_heap_flags)159 int BufferAllocator::MapNameToIonHeap(const std::string& heap_name,
160                                       const std::string& ion_heap_name,
161                                       unsigned int ion_heap_flags,
162                                       unsigned int legacy_ion_heap_mask,
163                                       unsigned int legacy_ion_heap_flags) {
164     /* if the DMA-BUF Heap exists, we can ignore ion mappings */
165     int ret = OpenDmabufHeap(heap_name);
166     if (ret >= 0)
167         return 0;
168 
169     /* If ION support is not detected, ignore the mappings */
170     if (ion_fd_ < 0) return 0;
171 
172     if (uses_legacy_ion_iface_ || ion_heap_name == "") {
173         ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags);
174     } else if (!ion_heap_name.empty()) {
175         ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags);
176     }
177 
178     return ret;
179 }
180 
GetIonConfig(const std::string & heap_name,IonHeapConfig & heap_config)181 int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
182     int ret = 0;
183 
184     std::shared_lock<std::shared_mutex> slock(heap_name_to_config_mutex_);
185 
186     auto it = heap_name_to_config_.find(heap_name);
187     if (it != heap_name_to_config_.end()) {
188         heap_config = it->second;
189         return ret;
190     }
191 
192     slock.unlock();
193 
194     if (uses_legacy_ion_iface_) {
195         ret = -EINVAL;
196     } else {
197         unsigned int heap_id;
198         ret = GetIonHeapIdByName(heap_name, &heap_id);
199         if (ret == 0) {
200             heap_config.mask = 1 << heap_id;
201             heap_config.flags = 0;
202             /* save it so that this lookup does not need to happen again */
203             std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
204             heap_name_to_config_[heap_name] = heap_config;
205         }
206     }
207 
208     if (ret)
209         LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
210     return ret;
211 }
212 
DmabufAlloc(const std::string & heap_name,size_t len)213 int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) {
214     int fd = OpenDmabufHeap(heap_name);
215     if (fd < 0) return fd;
216 
217     struct dma_heap_allocation_data heap_data{
218         .len = len,  // length of data to be allocated in bytes
219         .fd_flags = O_RDWR | O_CLOEXEC,  // permissions for the memory to be allocated
220     };
221 
222     auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
223     if (ret < 0) {
224         PLOG(ERROR) << "Unable to allocate from DMA-BUF heap: " << heap_name;
225         return ret;
226     }
227 
228     if (heap_data.fd >= 0) {
229         if (DmabufSetName(heap_data.fd, heap_name))
230             PLOG(WARNING) << "Unable to name DMA buffer for: " << heap_name;
231     }
232 
233     return heap_data.fd;
234 }
235 
DmabufSetName(unsigned int dmabuf_fd,const std::string & name)236 int BufferAllocator::DmabufSetName(unsigned int dmabuf_fd, const std::string& name) {
237     /* dma_buf_set_name truncates instead of returning an error */
238     if (name.length() > DMA_BUF_NAME_LEN) {
239         errno = ENAMETOOLONG;
240         return -1;
241     }
242 
243     return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_SET_NAME_B, name.c_str()));
244 }
245 
IonAlloc(const std::string & heap_name,size_t len,unsigned int heap_flags,size_t legacy_align)246 int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len,
247                               unsigned int heap_flags, size_t legacy_align) {
248     IonHeapConfig heap_config;
249     auto ret = GetIonConfig(heap_name, heap_config);
250     if (ret)
251         return ret;
252 
253     int alloc_fd = -1;
254     unsigned int flags = heap_config.flags | heap_flags;
255     ret = ion_alloc_fd(ion_fd_, len, legacy_align, heap_config.mask, flags, &alloc_fd);
256     if (ret) {
257         PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask
258                     << " and flags: " << flags;
259         return ret;
260     }
261     return alloc_fd;
262 }
263 
Alloc(const std::string & heap_name,size_t len,unsigned int heap_flags,size_t legacy_align)264 int BufferAllocator::Alloc(const std::string& heap_name, size_t len,
265                            unsigned int heap_flags, size_t legacy_align) {
266     int fd = DmabufAlloc(heap_name, len);
267 
268     if (fd < 0)
269         fd = IonAlloc(heap_name, len, heap_flags, legacy_align);
270 
271     return fd;
272 }
273 
AllocSystem(bool cpu_access_needed,size_t len,unsigned int heap_flags,size_t legacy_align)274 int BufferAllocator::AllocSystem(bool cpu_access_needed, size_t len, unsigned int heap_flags,
275                                  size_t legacy_align) {
276     if (!cpu_access_needed) {
277         /*
278          * CPU does not need to access allocated buffer so we try to allocate in
279          * the 'system-uncached' heap after querying for its existence.
280          */
281         static bool uncached_dmabuf_system_heap_support = [this]() -> bool {
282             auto dmabuf_heap_list = this->GetDmabufHeapList();
283             return (dmabuf_heap_list.find(kDmabufSystemUncachedHeapName) != dmabuf_heap_list.end());
284         }();
285 
286         if (uncached_dmabuf_system_heap_support)
287             return DmabufAlloc(kDmabufSystemUncachedHeapName, len);
288 
289         static bool uncached_ion_system_heap_support = [this]() -> bool {
290             IonHeapConfig heap_config;
291             auto ret = this->GetIonConfig(kDmabufSystemUncachedHeapName, heap_config);
292             return (ret == 0);
293         }();
294 
295         if (uncached_ion_system_heap_support)
296             return IonAlloc(kDmabufSystemUncachedHeapName, len, heap_flags, legacy_align);
297     }
298 
299     /*
300      * Either 1) CPU needs to access allocated buffer OR 2) CPU does not need to
301      * access allocated buffer but the "system-uncached" heap is unsupported.
302      */
303     return Alloc(kDmabufSystemHeapName, len, heap_flags, legacy_align);
304 }
305 
LegacyIonCpuSync(unsigned int dmabuf_fd,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)306 int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd,
307                                       const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
308                                       void *legacy_ion_custom_data) {
309     if (!legacy_ion_cpu_sync_custom)
310         return ion_sync_fd(ion_fd_, dmabuf_fd);
311 
312     // dup ion_fd_ so that we retain its ownership.
313     int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get()));
314     if (new_ion_fd < 0) {
315         PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd;
316         return new_ion_fd;
317     }
318 
319     int ret = legacy_ion_cpu_sync_custom(new_ion_fd, dmabuf_fd, legacy_ion_custom_data);
320 
321     close(new_ion_fd);
322     return ret;
323 }
324 
DoSync(unsigned int dmabuf_fd,bool start,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)325 int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
326                             const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
327                             void *legacy_ion_custom_data) {
328     if (uses_legacy_ion_iface_) {
329         return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom,
330                                 legacy_ion_custom_data);
331     }
332 
333     struct dma_buf_sync sync = {
334         .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) |
335                 static_cast<uint64_t>(sync_type),
336     };
337     return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync));
338 }
339 
CpuSyncStart(unsigned int dmabuf_fd,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)340 int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type,
341                                   const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
342                                   void *legacy_ion_custom_data) {
343     int ret = DoSync(dmabuf_fd, true, sync_type, legacy_ion_cpu_sync_custom,
344                      legacy_ion_custom_data);
345 
346     if (ret) PLOG(ERROR) << "CpuSyncStart() failure";
347     return ret;
348 }
349 
CpuSyncEnd(unsigned int dmabuf_fd,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)350 int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd, SyncType sync_type,
351                                 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
352                                 void* legacy_ion_custom_data) {
353     int ret = DoSync(dmabuf_fd, false, sync_type, legacy_ion_cpu_sync_custom,
354                      legacy_ion_custom_data);
355     if (ret) PLOG(ERROR) << "CpuSyncEnd() failure";
356 
357     return ret;
358 }
359 
GetDmabufHeapList()360 std::unordered_set<std::string> BufferAllocator::GetDmabufHeapList() {
361     std::unordered_set<std::string> heap_list;
362     std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir(kDmaHeapRoot), closedir);
363 
364     if (dir) {
365         struct dirent* dent;
366         while ((dent = readdir(dir.get()))) {
367             if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) continue;
368 
369             heap_list.insert(dent->d_name);
370         }
371     }
372 
373     return heap_list;
374 }
375 
CheckIonSupport()376 bool BufferAllocator::CheckIonSupport() {
377     static bool ion_support = (access(kIonDevice, R_OK) == 0);
378 
379     return ion_support;
380 }
381