• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifdef LOG_TAG
18 #undef LOG_TAG
19 #endif
20 #define LOG_TAG "VSoCGrallocRegionRegistry"
21 
22 #include <limits.h>
23 #include <errno.h>
24 #include <pthread.h>
25 #include <unistd.h>
26 #include <string.h>
27 
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 
32 #include <cutils/hashmap.h>
33 #include <log/log.h>
34 #include <cutils/atomic.h>
35 
36 #include <linux/ashmem.h>
37 
38 #include <hardware/hardware.h>
39 #include <hardware/gralloc.h>
40 #include <system/graphics.h>
41 
42 #include "gralloc_vsoc_priv.h"
43 
44 #include <deque>
45 #include <map>
46 #include <mutex>
47 
48 // TODO(ghartman): Make the configurable through a property
49 static const bool g_log_refs = false;
50 
51 struct GrallocRegion {
52   void* base_;
53   int   num_references_;
54 
GrallocRegionGrallocRegion55   GrallocRegion() : base_(0), num_references_(0) { }
56   // Copy constructors are ok.
57 };
58 
59 
get_buffer_name(const private_handle_t * hnd,char output[ASHMEM_NAME_LEN])60 static const char* get_buffer_name(
61     const private_handle_t* hnd, char output[ASHMEM_NAME_LEN]) {
62   output[0] = '\0';
63   if (!hnd) {
64     ALOGE("Attempted to log gralloc name hnd=NULL");
65     return output;
66   }
67   if (hnd->fd == -1) {
68     ALOGE("Attempted to log gralloc name hnd=%p with fd == -1", hnd);
69     return output;
70   }
71   int rval = ioctl(hnd->fd, ASHMEM_GET_NAME, output);
72   if (rval == -1) {
73     output[0] = '\0';
74   }
75   return output;
76 }
77 
78 
str_hash(void * str)79 static int str_hash(void* str) {
80   return hashmapHash(str, strlen(reinterpret_cast<const char*>(str)));
81 }
82 
83 
str_equal(void * a,void * b)84 static bool str_equal(void* a, void* b) {
85   return strcmp(
86       reinterpret_cast<const char*>(a),
87       reinterpret_cast<const char*>(b)) == 0;
88 }
89 
90 
get_regions()91 static Hashmap* get_regions() {
92   static Hashmap* regionMap = hashmapCreate(19, str_hash, str_equal);
93   return regionMap;
94 }
95 
96 
lock_region_for_handle(const private_handle_t * hnd,char region_name[ASHMEM_NAME_LEN])97 static GrallocRegion* lock_region_for_handle(
98     const private_handle_t* hnd, char region_name[ASHMEM_NAME_LEN]) {
99   region_name[0] = '\0';
100   get_buffer_name(hnd, region_name);
101   Hashmap* hash = get_regions();
102   hashmapLock(hash);
103   GrallocRegion* region = reinterpret_cast<GrallocRegion*>(
104       hashmapGet(hash, region_name));
105   if (!region) {
106     region = new GrallocRegion;
107     hashmapPut(hash, strdup(region_name), region);
108   }
109   return region;
110 }
111 
112 
113 /* The current implementation uses only a single lock for all regions.
114  * This method takes a region to simplfy the refactoring if we go to
115  * finer-grained locks.
116  */
unlock_region(GrallocRegion *)117 static inline void unlock_region(GrallocRegion* ) {
118   hashmapUnlock(get_regions());
119 }
120 
121 
122 /*
123  * surface_flinger can drop its last reference to a gralloc buffer (from the
124  * gralloc HAL's point of view) even though it also has work in flight to the
125  * GPU for that target. This causes segfaults in the swiftshader code.
126  *
127  * We create a compromise solution. On unmap we release the pages by mmaping
128  * anonymous memory over the range, but we don't release the address space.
129  * Instead we mark the address space for recycling into a new gralloc buffer.
130  * This means that the shaders can still write, that the writes won't land in
131  * the gralloc buffer, and the gralloc buffer memory can be released.
132  *
133  * When we're preparing to mmap a new gralloc buffer we see if we can recycle
134  * address space from a prior gralloc buffer.
135  *
136  * The protects the application layer from stray memory writes and pointer
137  * references to freed memory. It does mean that bad pixel data can land in
138  * a buffer in the case of a fast map-unmap-map sequence. However, that
139  * could also happen on a physical GPU.
140  *
141  * The alternative to this would be to create an elaborate reference counting
142  * mechanism below both gralloc and SwiftShader. However, we want to keep the
143  * SwiftShader code clean, so that seems undesirable.
144  *
145  * This problem also comes up for physical GPUs b/62267886. Background fo rthis
146  * solution is in b/118777601
147  */
148 
149 static std::map<size_t, std::deque<void*>> g_recycled_addrs;
150 std::mutex g_recycled_addrs_mutex;
151 
152 
153 
recycle_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)154 static void* recycle_mmap(void *addr, size_t length, int prot, int flags,
155                           int fd, off_t offset) {
156   if (!addr) {
157     std::lock_guard<std::mutex> guard(g_recycled_addrs_mutex);
158     auto it = g_recycled_addrs.find(length);
159     if (it != g_recycled_addrs.end()) {
160       if (it->second.size()) {
161         addr = it->second.front();
162         flags |= MAP_FIXED;
163         it->second.pop_front();
164       }
165     }
166   }
167   return mmap(addr, length, prot, flags, fd, offset);
168 }
169 
170 
recycle_munmap(void * addr,size_t length)171 static int recycle_munmap(void *addr, size_t length) {
172   // Do this first so we don't hold the mutex during the syscall
173   if (addr != mmap(addr, length, PROT_READ|PROT_WRITE,
174                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0)) {
175     // Be conservative. Don't recycle here.
176     return -1;
177   }
178   std::lock_guard<std::mutex> guard(g_recycled_addrs_mutex);
179   g_recycled_addrs[length].push_back(addr);
180   return 0;
181 }
182 
183 
reference_region(const char * op,const private_handle_t * hnd)184 void* reference_region(const char* op, const private_handle_t* hnd) {
185   char name_buf[ASHMEM_NAME_LEN];
186   GrallocRegion* region = lock_region_for_handle(hnd, name_buf);
187   if (!region->base_) {
188     void* mappedAddress = recycle_mmap(
189         0, hnd->total_size, PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0);
190     if (mappedAddress == MAP_FAILED) {
191       ALOGE("Could not mmap %s", strerror(errno));
192       unlock_region(region);
193       return NULL;
194     }
195     // Set up the guard pages. The last page is always a guard
196     uintptr_t base = uintptr_t(mappedAddress);
197     uintptr_t addr = base + hnd->total_size - PAGE_SIZE;
198     if (mprotect((void*)addr, PAGE_SIZE, PROT_NONE) == -1) {
199       ALOGE("mprotect base=%p, pg=%p failed (%s)", (void*)base, (void*)addr,
200             strerror(errno));
201     }
202     region->base_ = mappedAddress;
203     ALOGI("Mapped %s hnd=%p fd=%d base=%p format=%s(0x%x) width=%d height=%d",
204           name_buf, hnd, hnd->fd, region->base_,
205           pixel_format_to_string(hnd->format), hnd->format,
206           hnd->x_res, hnd->y_res);
207   }
208 
209   void* rval = region->base_;
210   ++region->num_references_;
211   ALOGI_IF(g_log_refs, "Referencing name=%s op=%s addr=%p new numRefs=%d",
212         name_buf, op, region->base_, region->num_references_);
213   unlock_region(region);
214   return rval;
215 }
216 
217 
unreference_region(const char * op,const private_handle_t * hnd)218 int unreference_region(const char* op, const private_handle_t* hnd) {
219   char name_buf[ASHMEM_NAME_LEN];
220 
221   GrallocRegion* region = lock_region_for_handle(hnd, name_buf);
222   if (!region->base_) {
223     ALOGE("Unmapping region with no map hnd=%p", hnd);
224     unlock_region(region);
225     return -1;
226   }
227   if (region->num_references_ < 1) {
228     ALOGE(
229         "unmap with hnd=%p, numReferences=%d", hnd, region->num_references_);
230     unlock_region(region);
231     return -1;
232   }
233   --region->num_references_;
234   if (!region->num_references_) {
235     ALOGI("Unmapped %s hnd=%p fd=%d base=%p", name_buf, hnd,
236           hnd->fd, region->base_);
237     if (recycle_munmap(region->base_, hnd->total_size) < 0) {
238       ALOGE("Could not unmap %s", strerror(errno));
239     }
240     region->base_ = 0;
241   }
242   ALOGI_IF(g_log_refs, "Unreferencing name=%s op=%s addr=%p new numRefs=%d",
243         name_buf, op, region->base_, region->num_references_);
244   unlock_region(region);
245   return 0;
246 }
247