• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ReliableSurface.h"
18 
19 #include <log/log_main.h>
20 #include <private/android/AHardwareBufferHelpers.h>
21 // TODO: this should be including apex instead.
22 #include <system/window.h>
23 #include <vndk/window.h>
24 
25 namespace android::uirenderer::renderthread {
26 
27 // TODO: Re-enable after addressing more of the TODO's
28 // With this disabled we won't have a good up-front signal that the surface is no longer valid,
29 // however we can at least handle that reactively post-draw. There's just not a good mechanism
30 // to propagate this error back to the caller
31 constexpr bool DISABLE_BUFFER_PREFETCH = true;
32 
ReliableSurface(ANativeWindow * window)33 ReliableSurface::ReliableSurface(ANativeWindow* window) : mWindow(window) {
34     LOG_ALWAYS_FATAL_IF(!mWindow, "Error, unable to wrap a nullptr");
35     ANativeWindow_acquire(mWindow);
36 }
37 
~ReliableSurface()38 ReliableSurface::~ReliableSurface() {
39     clearReservedBuffer();
40     // Clear out the interceptors for proper hygiene.
41     // As a concrete example, if the underlying ANativeWindow is associated with
42     // an EGLSurface that is still in use, then if we don't clear out the
43     // interceptors then we walk into undefined behavior.
44     ANativeWindow_setCancelBufferInterceptor(mWindow, nullptr, nullptr);
45     ANativeWindow_setDequeueBufferInterceptor(mWindow, nullptr, nullptr);
46     ANativeWindow_setQueueBufferInterceptor(mWindow, nullptr, nullptr);
47     ANativeWindow_setPerformInterceptor(mWindow, nullptr, nullptr);
48     ANativeWindow_setQueryInterceptor(mWindow, nullptr, nullptr);
49     ANativeWindow_release(mWindow);
50 }
51 
init()52 void ReliableSurface::init() {
53     int result = ANativeWindow_setCancelBufferInterceptor(mWindow, hook_cancelBuffer, this);
54     LOG_ALWAYS_FATAL_IF(result != NO_ERROR, "Failed to set cancelBuffer interceptor: error = %d",
55                         result);
56 
57     result = ANativeWindow_setDequeueBufferInterceptor(mWindow, hook_dequeueBuffer, this);
58     LOG_ALWAYS_FATAL_IF(result != NO_ERROR, "Failed to set dequeueBuffer interceptor: error = %d",
59                         result);
60 
61     result = ANativeWindow_setQueueBufferInterceptor(mWindow, hook_queueBuffer, this);
62     LOG_ALWAYS_FATAL_IF(result != NO_ERROR, "Failed to set queueBuffer interceptor: error = %d",
63                         result);
64 
65     result = ANativeWindow_setPerformInterceptor(mWindow, hook_perform, this);
66     LOG_ALWAYS_FATAL_IF(result != NO_ERROR, "Failed to set perform interceptor: error = %d",
67                         result);
68 
69     result = ANativeWindow_setQueryInterceptor(mWindow, hook_query, this);
70     LOG_ALWAYS_FATAL_IF(result != NO_ERROR, "Failed to set query interceptor: error = %d",
71                         result);
72 }
73 
reserveNext()74 int ReliableSurface::reserveNext() {
75     if constexpr (DISABLE_BUFFER_PREFETCH) {
76         return OK;
77     }
78     {
79         std::lock_guard _lock{mMutex};
80         if (mReservedBuffer) {
81             ALOGW("reserveNext called but there was already a buffer reserved?");
82             return OK;
83         }
84         if (mBufferQueueState != OK) {
85             return UNKNOWN_ERROR;
86         }
87         if (mHasDequeuedBuffer) {
88             return OK;
89         }
90     }
91 
92     // TODO: Update this to better handle when requested dimensions have changed
93     // Currently the driver does this via query + perform but that's after we've already
94     // reserved a buffer. Should we do that logic instead? Or should we drop
95     // the backing Surface to the ground and go full manual on the IGraphicBufferProducer instead?
96 
97     int fenceFd = -1;
98     ANativeWindowBuffer* buffer = nullptr;
99 
100     // Note that this calls back into our own hooked method.
101     int result = ANativeWindow_dequeueBuffer(mWindow, &buffer, &fenceFd);
102 
103     {
104         std::lock_guard _lock{mMutex};
105         LOG_ALWAYS_FATAL_IF(mReservedBuffer, "race condition in reserveNext");
106         mReservedBuffer = buffer;
107         mReservedFenceFd.reset(fenceFd);
108     }
109 
110     return result;
111 }
112 
clearReservedBuffer()113 void ReliableSurface::clearReservedBuffer() {
114     ANativeWindowBuffer* buffer = nullptr;
115     int releaseFd = -1;
116     {
117         std::lock_guard _lock{mMutex};
118         if (mReservedBuffer) {
119             ALOGW("Reserved buffer %p was never used", mReservedBuffer);
120             buffer = mReservedBuffer;
121             releaseFd = mReservedFenceFd.release();
122         }
123         mReservedBuffer = nullptr;
124         mReservedFenceFd.reset();
125         mHasDequeuedBuffer = false;
126     }
127     if (buffer) {
128         // Note that clearReservedBuffer may be reentrant here, so
129         // mReservedBuffer must be cleared once we reach here to avoid recursing
130         // forever.
131         ANativeWindow_cancelBuffer(mWindow, buffer, releaseFd);
132     }
133 }
134 
isFallbackBuffer(const ANativeWindowBuffer * windowBuffer) const135 bool ReliableSurface::isFallbackBuffer(const ANativeWindowBuffer* windowBuffer) const {
136     if (!mScratchBuffer || !windowBuffer) {
137         return false;
138     }
139     ANativeWindowBuffer* scratchBuffer =
140             AHardwareBuffer_to_ANativeWindowBuffer(mScratchBuffer.get());
141     return windowBuffer == scratchBuffer;
142 }
143 
acquireFallbackBuffer(int error)144 ANativeWindowBuffer* ReliableSurface::acquireFallbackBuffer(int error) {
145     std::lock_guard _lock{mMutex};
146     mBufferQueueState = error;
147 
148     if (mScratchBuffer) {
149         return AHardwareBuffer_to_ANativeWindowBuffer(mScratchBuffer.get());
150     }
151 
152     AHardwareBuffer_Desc desc = AHardwareBuffer_Desc{
153             .usage = mUsage,
154             .format = mFormat,
155             .width = 1,
156             .height = 1,
157             .layers = 1,
158             .rfu0 = 0,
159             .rfu1 = 0,
160     };
161 
162     AHardwareBuffer* newBuffer;
163     int result = AHardwareBuffer_allocate(&desc, &newBuffer);
164 
165     if (result != NO_ERROR) {
166         // Allocate failed, that sucks
167         ALOGW("Failed to allocate scratch buffer, error=%d", result);
168         return nullptr;
169     }
170 
171     mScratchBuffer.reset(newBuffer);
172     return AHardwareBuffer_to_ANativeWindowBuffer(newBuffer);
173 }
174 
hook_dequeueBuffer(ANativeWindow * window,ANativeWindow_dequeueBufferFn dequeueBuffer,void * data,ANativeWindowBuffer ** buffer,int * fenceFd)175 int ReliableSurface::hook_dequeueBuffer(ANativeWindow* window,
176                                         ANativeWindow_dequeueBufferFn dequeueBuffer, void* data,
177                                         ANativeWindowBuffer** buffer, int* fenceFd) {
178     ReliableSurface* rs = reinterpret_cast<ReliableSurface*>(data);
179     {
180         std::lock_guard _lock{rs->mMutex};
181         if (rs->mReservedBuffer) {
182             *buffer = rs->mReservedBuffer;
183             *fenceFd = rs->mReservedFenceFd.release();
184             rs->mReservedBuffer = nullptr;
185             return OK;
186         }
187     }
188 
189     int result = dequeueBuffer(window, buffer, fenceFd);
190     if (result != OK) {
191         ALOGW("dequeueBuffer failed, error = %d; switching to fallback", result);
192         *buffer = rs->acquireFallbackBuffer(result);
193         *fenceFd = -1;
194         return *buffer ? OK : INVALID_OPERATION;
195     } else {
196         std::lock_guard _lock{rs->mMutex};
197         rs->mHasDequeuedBuffer = true;
198     }
199     return OK;
200 }
201 
hook_cancelBuffer(ANativeWindow * window,ANativeWindow_cancelBufferFn cancelBuffer,void * data,ANativeWindowBuffer * buffer,int fenceFd)202 int ReliableSurface::hook_cancelBuffer(ANativeWindow* window,
203                                        ANativeWindow_cancelBufferFn cancelBuffer, void* data,
204                                        ANativeWindowBuffer* buffer, int fenceFd) {
205     ReliableSurface* rs = reinterpret_cast<ReliableSurface*>(data);
206     rs->clearReservedBuffer();
207     if (rs->isFallbackBuffer(buffer)) {
208         if (fenceFd > 0) {
209             close(fenceFd);
210         }
211         return OK;
212     }
213     return cancelBuffer(window, buffer, fenceFd);
214 }
215 
hook_queueBuffer(ANativeWindow * window,ANativeWindow_queueBufferFn queueBuffer,void * data,ANativeWindowBuffer * buffer,int fenceFd)216 int ReliableSurface::hook_queueBuffer(ANativeWindow* window,
217                                       ANativeWindow_queueBufferFn queueBuffer, void* data,
218                                       ANativeWindowBuffer* buffer, int fenceFd) {
219     ReliableSurface* rs = reinterpret_cast<ReliableSurface*>(data);
220     rs->clearReservedBuffer();
221 
222     if (rs->isFallbackBuffer(buffer)) {
223         if (fenceFd > 0) {
224             close(fenceFd);
225         }
226         return OK;
227     }
228 
229     return queueBuffer(window, buffer, fenceFd);
230 }
231 
hook_perform(ANativeWindow * window,ANativeWindow_performFn perform,void * data,int operation,va_list args)232 int ReliableSurface::hook_perform(ANativeWindow* window, ANativeWindow_performFn perform,
233                                   void* data, int operation, va_list args) {
234     // Drop the reserved buffer if there is one since this (probably) mutated buffer dimensions
235     // TODO: Filter to things that only affect the reserved buffer
236     // TODO: Can we mutate the reserved buffer in some cases?
237     ReliableSurface* rs = reinterpret_cast<ReliableSurface*>(data);
238     rs->clearReservedBuffer();
239 
240     va_list argsCopy;
241     va_copy(argsCopy, args);
242     int result = perform(window, operation, argsCopy);
243 
244     {
245         std::lock_guard _lock{rs->mMutex};
246 
247         switch (operation) {
248             case ANATIVEWINDOW_PERFORM_SET_USAGE:
249                 rs->mUsage = va_arg(args, uint32_t);
250                 break;
251             case ANATIVEWINDOW_PERFORM_SET_USAGE64:
252                 rs->mUsage = va_arg(args, uint64_t);
253                 break;
254             case ANATIVEWINDOW_PERFORM_SET_BUFFERS_GEOMETRY:
255                 /* width */ va_arg(args, uint32_t);
256                 /* height */ va_arg(args, uint32_t);
257                 rs->mFormat = static_cast<AHardwareBuffer_Format>(va_arg(args, int32_t));
258                 break;
259             case ANATIVEWINDOW_PERFORM_SET_BUFFERS_FORMAT:
260                 rs->mFormat = static_cast<AHardwareBuffer_Format>(va_arg(args, int32_t));
261                 break;
262             case NATIVE_WINDOW_SET_BUFFER_COUNT:
263                 size_t bufferCount = va_arg(args, size_t);
264                 if (bufferCount >= rs->mExpectedBufferCount) {
265                     rs->mDidSetExtraBuffers = true;
266                 } else {
267                     ALOGD("HOOK FAILED! Expected %zd got = %zd", rs->mExpectedBufferCount, bufferCount);
268                 }
269                 break;
270         }
271     }
272     return result;
273 }
274 
hook_query(const ANativeWindow * window,ANativeWindow_queryFn query,void * data,int what,int * value)275 int ReliableSurface::hook_query(const ANativeWindow *window, ANativeWindow_queryFn query,
276         void *data, int what, int *value) {
277     ReliableSurface* rs = reinterpret_cast<ReliableSurface*>(data);
278     int result = query(window, what, value);
279     if (what == ANATIVEWINDOW_QUERY_MIN_UNDEQUEUED_BUFFERS && result == OK) {
280         std::lock_guard _lock{rs->mMutex};
281         rs->mExpectedBufferCount = *value + 2;
282     }
283     return result;
284 }
285 
286 };  // namespace android::uirenderer::renderthread
287