1 /*
2 * ion.c
3 *
4 * Memory Allocator functions for ion
5 *
6 * Copyright 2011 Google, Inc
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 #define LOG_TAG "ion"
21
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <linux/ion.h>
25 #include <stdatomic.h>
26 #include <stdio.h>
27 #include <string.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32
33 #include <ion/ion.h>
34 #include <linux/ion_4.19.h>
35
36 #include <log/log.h>
37
38 #define ION_ABI_VERSION_MODULAR_HEAPS 2
39
40 enum ion_version { ION_VERSION_UNKNOWN, ION_VERSION_MODERN, ION_VERSION_LEGACY };
41
42 static atomic_int g_ion_version = ATOMIC_VAR_INIT(ION_VERSION_UNKNOWN);
43
ion_is_legacy(int fd)44 int ion_is_legacy(int fd) {
45 int version = atomic_load_explicit(&g_ion_version, memory_order_acquire);
46 if (version == ION_VERSION_UNKNOWN) {
47 /**
48 * Check for FREE IOCTL here; it is available only in the old
49 * kernels, not the new ones.
50 */
51 int err = ion_free(fd, (ion_user_handle_t)0);
52 version = (err == -ENOTTY) ? ION_VERSION_MODERN : ION_VERSION_LEGACY;
53 atomic_store_explicit(&g_ion_version, version, memory_order_release);
54 }
55 return version == ION_VERSION_LEGACY;
56 }
57
ion_open()58 int ion_open() {
59 int fd = open("/dev/ion", O_RDONLY | O_CLOEXEC);
60 if (fd < 0) ALOGE("open /dev/ion failed: %s", strerror(errno));
61
62 return fd;
63 }
64
ion_close(int fd)65 int ion_close(int fd) {
66 int ret = close(fd);
67 if (ret < 0) return -errno;
68 return ret;
69 }
70
ion_ioctl(int fd,int req,void * arg)71 static int ion_ioctl(int fd, int req, void* arg) {
72 int ret = ioctl(fd, req, arg);
73 if (ret < 0) {
74 /*
75 * To avoid confusion, do not print an error log if the error came from
76 * issuing an unimplemented IOCTL. Various versions of ION do not
77 * support several IOCTLs and they are issued to check which ION version
78 * the device supports.
79 */
80 if (errno != ENOTTY)
81 ALOGE("ioctl %x failed with code %d: %s", req, ret, strerror(errno));
82 return -errno;
83 }
84 return ret;
85 }
86
ion_is_using_modular_heaps(int fd)87 int ion_is_using_modular_heaps(int fd) {
88 int ion_abi_version = 0;
89 int ret = 0;
90
91 ret = ion_ioctl(fd, ION_IOC_ABI_VERSION, &ion_abi_version);
92 return (ret == 0 && ion_abi_version >= ION_ABI_VERSION_MODULAR_HEAPS);
93 }
94
ion_alloc(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,ion_user_handle_t * handle)95 int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
96 ion_user_handle_t* handle) {
97 int ret = 0;
98
99 if ((handle == NULL) || (!ion_is_legacy(fd))) return -EINVAL;
100
101 struct ion_allocation_data data = {
102 .len = len, .align = align, .heap_id_mask = heap_mask, .flags = flags,
103 };
104
105 ret = ion_ioctl(fd, ION_IOC_ALLOC, &data);
106 if (ret < 0) return ret;
107
108 *handle = data.handle;
109
110 return ret;
111 }
112
ion_free(int fd,ion_user_handle_t handle)113 int ion_free(int fd, ion_user_handle_t handle) {
114 struct ion_handle_data data = {
115 .handle = handle,
116 };
117 return ion_ioctl(fd, ION_IOC_FREE, &data);
118 }
119
ion_map(int fd,ion_user_handle_t handle,size_t length,int prot,int flags,off_t offset,unsigned char ** ptr,int * map_fd)120 int ion_map(int fd, ion_user_handle_t handle, size_t length, int prot, int flags, off_t offset,
121 unsigned char** ptr, int* map_fd) {
122 if (!ion_is_legacy(fd)) return -EINVAL;
123 int ret;
124 unsigned char* tmp_ptr;
125 struct ion_fd_data data = {
126 .handle = handle,
127 };
128
129 if (map_fd == NULL) return -EINVAL;
130 if (ptr == NULL) return -EINVAL;
131
132 ret = ion_ioctl(fd, ION_IOC_MAP, &data);
133 if (ret < 0) return ret;
134 if (data.fd < 0) {
135 ALOGE("map ioctl returned negative fd");
136 return -EINVAL;
137 }
138 tmp_ptr = mmap(NULL, length, prot, flags, data.fd, offset);
139 if (tmp_ptr == MAP_FAILED) {
140 ALOGE("mmap failed: %s", strerror(errno));
141 return -errno;
142 }
143 *map_fd = data.fd;
144 *ptr = tmp_ptr;
145 return ret;
146 }
147
ion_share(int fd,ion_user_handle_t handle,int * share_fd)148 int ion_share(int fd, ion_user_handle_t handle, int* share_fd) {
149 int ret;
150 struct ion_fd_data data = {
151 .handle = handle,
152 };
153
154 if (!ion_is_legacy(fd)) return -EINVAL;
155 if (share_fd == NULL) return -EINVAL;
156
157 ret = ion_ioctl(fd, ION_IOC_SHARE, &data);
158 if (ret < 0) return ret;
159 if (data.fd < 0) {
160 ALOGE("share ioctl returned negative fd");
161 return -EINVAL;
162 }
163 *share_fd = data.fd;
164 return ret;
165 }
166
ion_alloc_fd(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,int * handle_fd)167 int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
168 int* handle_fd) {
169 ion_user_handle_t handle;
170 int ret;
171
172 if (!handle_fd) return -EINVAL;
173
174 if (!ion_is_legacy(fd)) {
175 struct ion_new_allocation_data data = {
176 .len = len,
177 .heap_id_mask = heap_mask,
178 .flags = flags,
179 };
180
181 ret = ion_ioctl(fd, ION_IOC_NEW_ALLOC, &data);
182 if (ret < 0) return ret;
183 *handle_fd = data.fd;
184 } else {
185 ret = ion_alloc(fd, len, align, heap_mask, flags, &handle);
186 if (ret < 0) return ret;
187 ret = ion_share(fd, handle, handle_fd);
188 ion_free(fd, handle);
189 }
190 return ret;
191 }
192
ion_import(int fd,int share_fd,ion_user_handle_t * handle)193 int ion_import(int fd, int share_fd, ion_user_handle_t* handle) {
194 int ret;
195 struct ion_fd_data data = {
196 .fd = share_fd,
197 };
198
199 if (!ion_is_legacy(fd)) return -EINVAL;
200
201 if (handle == NULL) return -EINVAL;
202
203 ret = ion_ioctl(fd, ION_IOC_IMPORT, &data);
204 if (ret < 0) return ret;
205 *handle = data.handle;
206 return ret;
207 }
208
ion_sync_fd(int fd,int handle_fd)209 int ion_sync_fd(int fd, int handle_fd) {
210 struct ion_fd_data data = {
211 .fd = handle_fd,
212 };
213
214 if (!ion_is_legacy(fd)) return -EINVAL;
215
216 return ion_ioctl(fd, ION_IOC_SYNC, &data);
217 }
218
ion_query_heap_cnt(int fd,int * cnt)219 int ion_query_heap_cnt(int fd, int* cnt) {
220 int ret;
221 struct ion_heap_query query;
222
223 if (!cnt) return -EINVAL;
224 memset(&query, 0, sizeof(query));
225
226 ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
227 if (ret < 0) return ret;
228
229 *cnt = query.cnt;
230 return ret;
231 }
232
ion_query_get_heaps(int fd,int cnt,void * buffers)233 int ion_query_get_heaps(int fd, int cnt, void* buffers) {
234 int ret;
235 struct ion_heap_query query = {
236 .cnt = cnt, .heaps = (uintptr_t)buffers,
237 };
238
239 ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
240 return ret;
241 }
242