• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 
19 #include <limits.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #include <pthread.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include <sys/mman.h>
27 
28 #include <cutils/log.h>
29 #include <cutils/ashmem.h>
30 
31 #include "gralloc_priv.h"
32 #include "pmemalloc.h"
33 
34 
35 #define BEGIN_FUNC ALOGV("%s begin", __PRETTY_FUNCTION__)
36 #define END_FUNC ALOGV("%s end", __PRETTY_FUNCTION__)
37 
38 
get_open_flags(int usage)39 static int get_open_flags(int usage) {
40     int openFlags = O_RDWR | O_SYNC;
41     uint32_t uread = usage & GRALLOC_USAGE_SW_READ_MASK;
42     uint32_t uwrite = usage & GRALLOC_USAGE_SW_WRITE_MASK;
43     if (uread == GRALLOC_USAGE_SW_READ_OFTEN ||
44         uwrite == GRALLOC_USAGE_SW_WRITE_OFTEN) {
45         openFlags &= ~O_SYNC;
46     }
47     return openFlags;
48 }
49 
~PmemAllocator()50 PmemAllocator::~PmemAllocator()
51 {
52     BEGIN_FUNC;
53     END_FUNC;
54 }
55 
56 
PmemUserspaceAllocator(Deps & deps,Deps::Allocator & allocator,const char * pmemdev)57 PmemUserspaceAllocator::PmemUserspaceAllocator(Deps& deps, Deps::Allocator& allocator, const char* pmemdev):
58     deps(deps),
59     allocator(allocator),
60     pmemdev(pmemdev),
61     master_fd(MASTER_FD_INIT)
62 {
63     BEGIN_FUNC;
64     pthread_mutex_init(&lock, NULL);
65     END_FUNC;
66 }
67 
68 
~PmemUserspaceAllocator()69 PmemUserspaceAllocator::~PmemUserspaceAllocator()
70 {
71     BEGIN_FUNC;
72     END_FUNC;
73 }
74 
75 
get_base_address()76 void* PmemUserspaceAllocator::get_base_address() {
77     BEGIN_FUNC;
78     END_FUNC;
79     return master_base;
80 }
81 
82 
init_pmem_area_locked()83 int PmemUserspaceAllocator::init_pmem_area_locked()
84 {
85     BEGIN_FUNC;
86     int err = 0;
87     int fd = deps.open(pmemdev, O_RDWR, 0);
88     if (fd >= 0) {
89         size_t size = 0;
90         err = deps.getPmemTotalSize(fd, &size);
91         if (err < 0) {
92             ALOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", pmemdev,
93                     err);
94             size = 8<<20;   // 8 MiB
95         }
96         allocator.setSize(size);
97 
98         void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
99                 0);
100         if (base == MAP_FAILED) {
101             ALOGE("%s: failed to map pmem master fd: %s", pmemdev,
102                     strerror(deps.getErrno()));
103             err = -deps.getErrno();
104             base = 0;
105             deps.close(fd);
106             fd = -1;
107         } else {
108             master_fd = fd;
109             master_base = base;
110         }
111     } else {
112         ALOGE("%s: failed to open pmem device: %s", pmemdev,
113                 strerror(deps.getErrno()));
114         err = -deps.getErrno();
115     }
116     END_FUNC;
117     return err;
118 }
119 
120 
init_pmem_area()121 int PmemUserspaceAllocator::init_pmem_area()
122 {
123     BEGIN_FUNC;
124     pthread_mutex_lock(&lock);
125     int err = master_fd;
126     if (err == MASTER_FD_INIT) {
127         // first time, try to initialize pmem
128         err = init_pmem_area_locked();
129         if (err) {
130             ALOGE("%s: failed to initialize pmem area", pmemdev);
131             master_fd = err;
132         }
133     } else if (err < 0) {
134         // pmem couldn't be initialized, never use it
135     } else {
136         // pmem OK
137         err = 0;
138     }
139     pthread_mutex_unlock(&lock);
140     END_FUNC;
141     return err;
142 }
143 
144 
alloc_pmem_buffer(size_t size,int usage,void ** pBase,int * pOffset,int * pFd)145 int PmemUserspaceAllocator::alloc_pmem_buffer(size_t size, int usage,
146         void** pBase, int* pOffset, int* pFd)
147 {
148     BEGIN_FUNC;
149     int err = init_pmem_area();
150     if (err == 0) {
151         void* base = master_base;
152         int offset = allocator.allocate(size);
153         if (offset < 0) {
154             // no more pmem memory
155             ALOGE("%s: no more pmem available", pmemdev);
156             err = -ENOMEM;
157         } else {
158             int openFlags = get_open_flags(usage);
159 
160             //ALOGD("%s: allocating pmem at offset 0x%p", pmemdev, offset);
161 
162             // now create the "sub-heap"
163             int fd = deps.open(pmemdev, openFlags, 0);
164             err = fd < 0 ? fd : 0;
165 
166             // and connect to it
167             if (err == 0)
168                 err = deps.connectPmem(fd, master_fd);
169 
170             // and make it available to the client process
171             if (err == 0)
172                 err = deps.mapPmem(fd, offset, size);
173 
174             if (err < 0) {
175                 ALOGE("%s: failed to initialize pmem sub-heap: %d", pmemdev,
176                         err);
177                 err = -deps.getErrno();
178                 deps.close(fd);
179                 allocator.deallocate(offset);
180                 fd = -1;
181             } else {
182                 ALOGV("%s: mapped fd %d at offset %d, size %d", pmemdev, fd, offset, size);
183                 memset((char*)base + offset, 0, size);
184                 *pBase = base;
185                 *pOffset = offset;
186                 *pFd = fd;
187             }
188             //ALOGD_IF(!err, "%s: allocating pmem size=%d, offset=%d", pmemdev, size, offset);
189         }
190     }
191     END_FUNC;
192     return err;
193 }
194 
195 
free_pmem_buffer(size_t size,void * base,int offset,int fd)196 int PmemUserspaceAllocator::free_pmem_buffer(size_t size, void* base, int offset, int fd)
197 {
198     BEGIN_FUNC;
199     int err = 0;
200     if (fd >= 0) {
201         int err = deps.unmapPmem(fd, offset, size);
202         ALOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
203                 "sub.size=%u", strerror(deps.getErrno()), fd, offset, size);
204         if (err == 0) {
205             // we can't deallocate the memory in case of UNMAP failure
206             // because it would give that process access to someone else's
207             // surfaces, which would be a security breach.
208             allocator.deallocate(offset);
209         }
210     }
211     END_FUNC;
212     return err;
213 }
214 
~Allocator()215 PmemUserspaceAllocator::Deps::Allocator::~Allocator()
216 {
217     BEGIN_FUNC;
218     END_FUNC;
219 }
220 
~Deps()221 PmemUserspaceAllocator::Deps::~Deps()
222 {
223     BEGIN_FUNC;
224     END_FUNC;
225 }
226 
PmemKernelAllocator(Deps & deps,const char * pmemdev)227 PmemKernelAllocator::PmemKernelAllocator(Deps& deps, const char* pmemdev):
228     deps(deps),
229     pmemdev(pmemdev)
230 {
231     BEGIN_FUNC;
232     END_FUNC;
233 }
234 
235 
~PmemKernelAllocator()236 PmemKernelAllocator::~PmemKernelAllocator()
237 {
238     BEGIN_FUNC;
239     END_FUNC;
240 }
241 
242 
get_base_address()243 void* PmemKernelAllocator::get_base_address() {
244     BEGIN_FUNC;
245     END_FUNC;
246     return 0;
247 }
248 
249 
clp2(unsigned x)250 static unsigned clp2(unsigned x) {
251     x = x - 1;
252     x = x | (x >> 1);
253     x = x | (x >> 2);
254     x = x | (x >> 4);
255     x = x | (x >> 8);
256     x = x | (x >>16);
257     return x + 1;
258 }
259 
260 
alloc_pmem_buffer(size_t size,int usage,void ** pBase,int * pOffset,int * pFd)261 int PmemKernelAllocator::alloc_pmem_buffer(size_t size, int usage,
262         void** pBase,int* pOffset, int* pFd)
263 {
264     BEGIN_FUNC;
265 
266     *pBase = 0;
267     *pOffset = 0;
268     *pFd = -1;
269 
270     int err;
271     int openFlags = get_open_flags(usage);
272     int fd = deps.open(pmemdev, openFlags, 0);
273     if (fd < 0) {
274         err = -deps.getErrno();
275         END_FUNC;
276         return err;
277     }
278 
279     // The size should already be page aligned, now round it up to a power of 2.
280     size = clp2(size);
281 
282     void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
283     if (base == MAP_FAILED) {
284         ALOGE("%s: failed to map pmem fd: %s", pmemdev,
285              strerror(deps.getErrno()));
286         err = -deps.getErrno();
287         deps.close(fd);
288         END_FUNC;
289         return err;
290     }
291 
292     memset(base, 0, size);
293 
294     *pBase = base;
295     *pOffset = 0;
296     *pFd = fd;
297 
298     END_FUNC;
299     return 0;
300 }
301 
302 
free_pmem_buffer(size_t size,void * base,int offset,int fd)303 int PmemKernelAllocator::free_pmem_buffer(size_t size, void* base, int offset, int fd)
304 {
305     BEGIN_FUNC;
306     // The size should already be page aligned, now round it up to a power of 2
307     // like we did when allocating.
308     size = clp2(size);
309 
310     int err = deps.munmap(base, size);
311     if (err < 0) {
312         err = deps.getErrno();
313         ALOGW("%s: error unmapping pmem fd: %s", pmemdev, strerror(err));
314         return -err;
315     }
316     END_FUNC;
317     return 0;
318 }
319 
~Deps()320 PmemKernelAllocator::Deps::~Deps()
321 {
322     BEGIN_FUNC;
323     END_FUNC;
324 }
325