• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013-2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Camera3-OutputStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20 
21 #include <algorithm>
22 #include <ctime>
23 #include <fstream>
24 
25 #include <aidl/android/hardware/camera/device/CameraBlob.h>
26 #include <aidl/android/hardware/camera/device/CameraBlobId.h>
27 
28 #include <android-base/unique_fd.h>
29 #include <cutils/properties.h>
30 #include <ui/GraphicBuffer.h>
31 #include <utils/Log.h>
32 #include <utils/Trace.h>
33 
34 #include <common/CameraDeviceBase.h>
35 #include "api1/client2/JpegProcessor.h"
36 #include "Camera3OutputStream.h"
37 #include "utils/TraceHFR.h"
38 
39 #ifndef container_of
40 #define container_of(ptr, type, member) \
41     (type *)((char*)(ptr) - offsetof(type, member))
42 #endif
43 
44 namespace android {
45 
46 namespace camera3 {
47 
48 using aidl::android::hardware::camera::device::CameraBlob;
49 using aidl::android::hardware::camera::device::CameraBlobId;
50 
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode)51 Camera3OutputStream::Camera3OutputStream(int id,
52         sp<Surface> consumer,
53         uint32_t width, uint32_t height, int format,
54         android_dataspace dataSpace, camera_stream_rotation_t rotation,
55         nsecs_t timestampOffset, const String8& physicalCameraId,
56         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
57         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
58         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
59         int mirrorMode) :
60         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
61                             /*maxSize*/0, format, dataSpace, rotation,
62                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
63                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
64                             timestampBase),
65         mConsumer(consumer),
66         mTransform(0),
67         mTraceFirstBuffer(true),
68         mUseBufferManager(false),
69         mTimestampOffset(timestampOffset),
70         mUseReadoutTime(false),
71         mConsumerUsage(0),
72         mDropBuffers(false),
73         mMirrorMode(mirrorMode),
74         mDequeueBufferLatency(kDequeueLatencyBinSize),
75         mIPCTransport(transport) {
76 
77     if (mConsumer == NULL) {
78         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
79         mState = STATE_ERROR;
80     }
81 
82     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
83     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
84 }
85 
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,size_t maxSize,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode)86 Camera3OutputStream::Camera3OutputStream(int id,
87         sp<Surface> consumer,
88         uint32_t width, uint32_t height, size_t maxSize, int format,
89         android_dataspace dataSpace, camera_stream_rotation_t rotation,
90         nsecs_t timestampOffset, const String8& physicalCameraId,
91         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
92         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
93         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
94         int mirrorMode) :
95         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
96                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
97                             setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
98                             deviceTimeBaseIsRealtime, timestampBase),
99         mConsumer(consumer),
100         mTransform(0),
101         mTraceFirstBuffer(true),
102         mUseBufferManager(false),
103         mTimestampOffset(timestampOffset),
104         mUseReadoutTime(false),
105         mConsumerUsage(0),
106         mDropBuffers(false),
107         mMirrorMode(mirrorMode),
108         mDequeueBufferLatency(kDequeueLatencyBinSize),
109         mIPCTransport(transport) {
110 
111     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
112         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
113                 format);
114         mState = STATE_ERROR;
115     }
116 
117     if (mConsumer == NULL) {
118         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
119         mState = STATE_ERROR;
120     }
121 
122     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
123     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
124 }
125 
Camera3OutputStream(int id,uint32_t width,uint32_t height,int format,uint64_t consumerUsage,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode)126 Camera3OutputStream::Camera3OutputStream(int id,
127         uint32_t width, uint32_t height, int format,
128         uint64_t consumerUsage, android_dataspace dataSpace,
129         camera_stream_rotation_t rotation, nsecs_t timestampOffset,
130         const String8& physicalCameraId,
131         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
132         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
133         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
134         int mirrorMode) :
135         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
136                             /*maxSize*/0, format, dataSpace, rotation,
137                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
138                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
139                             timestampBase),
140         mConsumer(nullptr),
141         mTransform(0),
142         mTraceFirstBuffer(true),
143         mUseBufferManager(false),
144         mTimestampOffset(timestampOffset),
145         mUseReadoutTime(false),
146         mConsumerUsage(consumerUsage),
147         mDropBuffers(false),
148         mMirrorMode(mirrorMode),
149         mDequeueBufferLatency(kDequeueLatencyBinSize),
150         mIPCTransport(transport) {
151     // Deferred consumer only support preview surface format now.
152     if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
153         ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
154                 __FUNCTION__);
155         mState = STATE_ERROR;
156     }
157 
158     // Validation check for the consumer usage flag.
159     if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
160             (consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
161         ALOGE("%s: Deferred consumer usage flag is illegal %" PRIu64 "!",
162               __FUNCTION__, consumerUsage);
163         mState = STATE_ERROR;
164     }
165 
166     mConsumerName = String8("Deferred");
167     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
168     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
169 }
170 
Camera3OutputStream(int id,camera_stream_type_t type,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,uint64_t consumerUsage,nsecs_t timestampOffset,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode)171 Camera3OutputStream::Camera3OutputStream(int id, camera_stream_type_t type,
172                                          uint32_t width, uint32_t height,
173                                          int format,
174                                          android_dataspace dataSpace,
175                                          camera_stream_rotation_t rotation,
176                                          const String8& physicalCameraId,
177                                          const std::unordered_set<int32_t> &sensorPixelModesUsed,
178                                          IPCTransport transport,
179                                          uint64_t consumerUsage, nsecs_t timestampOffset,
180                                          int setId, bool isMultiResolution,
181                                          int64_t dynamicRangeProfile, int64_t streamUseCase,
182                                          bool deviceTimeBaseIsRealtime, int timestampBase,
183                                          int mirrorMode) :
184         Camera3IOStreamBase(id, type, width, height,
185                             /*maxSize*/0,
186                             format, dataSpace, rotation,
187                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
188                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
189                             timestampBase),
190         mTransform(0),
191         mTraceFirstBuffer(true),
192         mUseBufferManager(false),
193         mTimestampOffset(timestampOffset),
194         mUseReadoutTime(false),
195         mConsumerUsage(consumerUsage),
196         mDropBuffers(false),
197         mMirrorMode(mirrorMode),
198         mDequeueBufferLatency(kDequeueLatencyBinSize),
199         mIPCTransport(transport) {
200 
201     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
202     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
203 
204     // Subclasses expected to initialize mConsumer themselves
205 }
206 
207 
~Camera3OutputStream()208 Camera3OutputStream::~Camera3OutputStream() {
209     disconnectLocked();
210 }
211 
getBufferLocked(camera_stream_buffer * buffer,const std::vector<size_t> &)212 status_t Camera3OutputStream::getBufferLocked(camera_stream_buffer *buffer,
213         const std::vector<size_t>&) {
214     ATRACE_HFR_CALL();
215 
216     ANativeWindowBuffer* anb;
217     int fenceFd = -1;
218 
219     status_t res;
220     res = getBufferLockedCommon(&anb, &fenceFd);
221     if (res != OK) {
222         return res;
223     }
224 
225     /**
226      * FenceFD now owned by HAL except in case of error,
227      * in which case we reassign it to acquire_fence
228      */
229     handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
230                         /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
231 
232     return OK;
233 }
234 
getBuffersLocked(std::vector<OutstandingBuffer> * outBuffers)235 status_t Camera3OutputStream::getBuffersLocked(std::vector<OutstandingBuffer>* outBuffers) {
236     status_t res;
237 
238     if ((res = getBufferPreconditionCheckLocked()) != OK) {
239         return res;
240     }
241 
242     if (mUseBufferManager) {
243         ALOGE("%s: stream %d is managed by buffer manager and does not support batch operation",
244                 __FUNCTION__, mId);
245         return INVALID_OPERATION;
246     }
247 
248     sp<Surface> consumer = mConsumer;
249     /**
250      * Release the lock briefly to avoid deadlock for below scenario:
251      * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
252      * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
253      * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
254      * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
255      * StreamingProcessor lock.
256      * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
257      * and try to lock bufferQueue lock.
258      * Then there is circular locking dependency.
259      */
260     mLock.unlock();
261 
262     size_t numBuffersRequested = outBuffers->size();
263     std::vector<Surface::BatchBuffer> buffers(numBuffersRequested);
264 
265     nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
266     res = consumer->dequeueBuffers(&buffers);
267     nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
268     mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
269 
270     mLock.lock();
271 
272     if (res != OK) {
273         if (shouldLogError(res, mState)) {
274             ALOGE("%s: Stream %d: Can't dequeue %zu output buffers: %s (%d)",
275                     __FUNCTION__, mId, numBuffersRequested, strerror(-res), res);
276         }
277         checkRetAndSetAbandonedLocked(res);
278         return res;
279     }
280     checkRemovedBuffersLocked();
281 
282     /**
283      * FenceFD now owned by HAL except in case of error,
284      * in which case we reassign it to acquire_fence
285      */
286     for (size_t i = 0; i < numBuffersRequested; i++) {
287         handoutBufferLocked(*(outBuffers->at(i).outBuffer),
288                 &(buffers[i].buffer->handle), /*acquireFence*/buffers[i].fenceFd,
289                 /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
290     }
291     return OK;
292 }
293 
queueBufferToConsumer(sp<ANativeWindow> & consumer,ANativeWindowBuffer * buffer,int anwReleaseFence,const std::vector<size_t> &)294 status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
295             ANativeWindowBuffer* buffer, int anwReleaseFence,
296             const std::vector<size_t>&) {
297     return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
298 }
299 
returnBufferLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,int32_t transform,const std::vector<size_t> & surface_ids)300 status_t Camera3OutputStream::returnBufferLocked(
301         const camera_stream_buffer &buffer,
302         nsecs_t timestamp, nsecs_t readoutTimestamp,
303         int32_t transform, const std::vector<size_t>& surface_ids) {
304     ATRACE_HFR_CALL();
305 
306     if (mHandoutTotalBufferCount == 1) {
307         returnPrefetchedBuffersLocked();
308     }
309 
310     status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
311                                          /*output*/true, transform, surface_ids);
312 
313     if (res != OK) {
314         return res;
315     }
316 
317     mLastTimestamp = timestamp;
318     mFrameCount++;
319 
320     return OK;
321 }
322 
fixUpHidlJpegBlobHeader(ANativeWindowBuffer * anwBuffer,int fence)323 status_t Camera3OutputStream::fixUpHidlJpegBlobHeader(ANativeWindowBuffer* anwBuffer, int fence) {
324     // Lock the JPEG buffer for CPU read
325     sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
326     void* mapped = nullptr;
327     base::unique_fd fenceFd(dup(fence));
328     // Use USAGE_SW_WRITE_RARELY since we're going to re-write the CameraBlob
329     // header.
330     GraphicBufferLocker gbLocker(graphicBuffer);
331     status_t res =
332             gbLocker.lockAsync(
333                     GraphicBuffer::USAGE_SW_READ_OFTEN | GraphicBuffer::USAGE_SW_WRITE_RARELY,
334                     &mapped, fenceFd.get());
335     if (res != OK) {
336         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
337         return res;
338     }
339 
340     uint8_t *hidlHeaderStart =
341             static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t);
342     // Check that the jpeg buffer is big enough to contain HIDL camera blob
343     if (hidlHeaderStart < static_cast<uint8_t *>(mapped)) {
344         ALOGE("%s, jpeg buffer not large enough to fit HIDL camera blob %" PRIu32, __FUNCTION__,
345                 graphicBuffer->getWidth());
346         return BAD_VALUE;
347     }
348     camera_jpeg_blob_t *hidlBlobHeader = reinterpret_cast<camera_jpeg_blob_t *>(hidlHeaderStart);
349 
350     // Check that the blob is indeed the jpeg blob id.
351     if (hidlBlobHeader->jpeg_blob_id != CAMERA_JPEG_BLOB_ID) {
352         ALOGE("%s, jpeg blob id %d is not correct", __FUNCTION__, hidlBlobHeader->jpeg_blob_id);
353         return BAD_VALUE;
354     }
355 
356     // Retrieve id and blob size
357     CameraBlobId blobId = static_cast<CameraBlobId>(hidlBlobHeader->jpeg_blob_id);
358     uint32_t blobSizeBytes = hidlBlobHeader->jpeg_size;
359 
360     if (blobSizeBytes > (graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t))) {
361         ALOGE("%s, blobSize in HIDL jpeg blob : %d is corrupt, buffer size %" PRIu32, __FUNCTION__,
362                   blobSizeBytes, graphicBuffer->getWidth());
363     }
364 
365     uint8_t *aidlHeaderStart =
366             static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(CameraBlob);
367 
368     // Check that the jpeg buffer is big enough to contain AIDL camera blob
369     if (aidlHeaderStart < static_cast<uint8_t *>(mapped)) {
370         ALOGE("%s, jpeg buffer not large enough to fit AIDL camera blob %" PRIu32, __FUNCTION__,
371                 graphicBuffer->getWidth());
372         return BAD_VALUE;
373     }
374 
375     if (static_cast<uint8_t*>(mapped) + blobSizeBytes > aidlHeaderStart) {
376         ALOGE("%s, jpeg blob with size %d , buffer size %" PRIu32 " not large enough to fit"
377                 " AIDL camera blob without corrupting jpeg", __FUNCTION__, blobSizeBytes,
378                 graphicBuffer->getWidth());
379         return BAD_VALUE;
380     }
381 
382     // Fill in JPEG header
383     CameraBlob aidlHeader = {
384             .blobId = blobId,
385             .blobSizeBytes = static_cast<int32_t>(blobSizeBytes)
386     };
387     memcpy(aidlHeaderStart, &aidlHeader, sizeof(CameraBlob));
388     graphicBuffer->unlock();
389     return OK;
390 }
391 
returnBufferCheckedLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,bool output,int32_t transform,const std::vector<size_t> & surface_ids,sp<Fence> * releaseFenceOut)392 status_t Camera3OutputStream::returnBufferCheckedLocked(
393             const camera_stream_buffer &buffer,
394             nsecs_t timestamp,
395             nsecs_t readoutTimestamp,
396             bool output,
397             int32_t transform,
398             const std::vector<size_t>& surface_ids,
399             /*out*/
400             sp<Fence> *releaseFenceOut) {
401 
402     (void)output;
403     ALOG_ASSERT(output, "Expected output to be true");
404 
405     status_t res;
406 
407     // Fence management - always honor release fence from HAL
408     sp<Fence> releaseFence = new Fence(buffer.release_fence);
409     int anwReleaseFence = releaseFence->dup();
410 
411     /**
412      * Release the lock briefly to avoid deadlock with
413      * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
414      * thread will go into StreamingProcessor::onFrameAvailable) during
415      * queueBuffer
416      */
417     sp<ANativeWindow> currentConsumer = mConsumer;
418     StreamState state = mState;
419     mLock.unlock();
420 
421     ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
422     bool bufferDeferred = false;
423     /**
424      * Return buffer back to ANativeWindow
425      */
426     if (buffer.status == CAMERA_BUFFER_STATUS_ERROR || mDropBuffers || timestamp == 0) {
427         // Cancel buffer
428         if (mDropBuffers) {
429             ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
430         } else if (buffer.status == CAMERA_BUFFER_STATUS_ERROR) {
431             ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
432         } else {
433             ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
434         }
435 
436         res = currentConsumer->cancelBuffer(currentConsumer.get(),
437                 anwBuffer,
438                 anwReleaseFence);
439         if (shouldLogError(res, state)) {
440             ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
441                   " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
442         }
443 
444         notifyBufferReleased(anwBuffer);
445         if (mUseBufferManager) {
446             // Return this buffer back to buffer manager.
447             mBufferProducerListener->onBufferReleased();
448         }
449     } else {
450         if (mTraceFirstBuffer && (stream_type == CAMERA_STREAM_OUTPUT)) {
451             {
452                 char traceLog[48];
453                 snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId);
454                 ATRACE_NAME(traceLog);
455             }
456             mTraceFirstBuffer = false;
457         }
458         // Fix CameraBlob id type discrepancy between HIDL and AIDL, details : http://b/229688810
459         if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF) {
460             if (mIPCTransport == IPCTransport::HIDL) {
461                 fixUpHidlJpegBlobHeader(anwBuffer, anwReleaseFence);
462             }
463             // If this is a JPEG output, and image dump mask is set, save image to
464             // disk.
465             if (mImageDumpMask) {
466                 dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
467             }
468         }
469 
470         nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
471                 readoutTimestamp : timestamp) - mTimestampOffset;
472         if (mPreviewFrameSpacer != nullptr) {
473             nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
474                     - mTimestampOffset;
475             res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
476                     transform, anwBuffer, anwReleaseFence);
477             if (res != OK) {
478                 ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
479                         __FUNCTION__, mId, strerror(-res), res);
480                 return res;
481             }
482             bufferDeferred = true;
483         } else {
484             nsecs_t presentTime = mSyncToDisplay ?
485                     syncTimestampToDisplayLocked(captureTime) : captureTime;
486 
487             setTransform(transform, true/*mayChangeMirror*/);
488             res = native_window_set_buffers_timestamp(mConsumer.get(), presentTime);
489             if (res != OK) {
490                 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
491                       __FUNCTION__, mId, strerror(-res), res);
492                 return res;
493             }
494 
495             queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
496 
497             res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
498             if (shouldLogError(res, state)) {
499                 ALOGE("%s: Stream %d: Error queueing buffer to native window:"
500                       " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
501             }
502         }
503     }
504     mLock.lock();
505 
506     if (bufferDeferred) {
507         mCachedOutputBufferCount++;
508     }
509 
510     // Once a valid buffer has been returned to the queue, can no longer
511     // dequeue all buffers for preallocation.
512     if (buffer.status != CAMERA_BUFFER_STATUS_ERROR) {
513         mStreamUnpreparable = true;
514     }
515 
516     if (res != OK) {
517         close(anwReleaseFence);
518     }
519 
520     *releaseFenceOut = releaseFence;
521 
522     return res;
523 }
524 
dump(int fd,const Vector<String16> & args) const525 void Camera3OutputStream::dump(int fd, const Vector<String16> &args) const {
526     (void) args;
527     String8 lines;
528     lines.appendFormat("    Stream[%d]: Output\n", mId);
529     lines.appendFormat("      Consumer name: %s\n", mConsumerName.string());
530     write(fd, lines.string(), lines.size());
531 
532     Camera3IOStreamBase::dump(fd, args);
533 
534     mDequeueBufferLatency.dump(fd,
535         "      DequeueBuffer latency histogram:");
536 }
537 
setTransform(int transform,bool mayChangeMirror)538 status_t Camera3OutputStream::setTransform(int transform, bool mayChangeMirror) {
539     ATRACE_CALL();
540     Mutex::Autolock l(mLock);
541     if (mMirrorMode != OutputConfiguration::MIRROR_MODE_AUTO && mayChangeMirror) {
542         // If the mirroring mode is not AUTO, do not allow transform update
543         // which may change mirror.
544         return OK;
545     }
546 
547     return setTransformLocked(transform);
548 }
549 
setTransformLocked(int transform)550 status_t Camera3OutputStream::setTransformLocked(int transform) {
551     status_t res = OK;
552 
553     if (transform == -1) return res;
554 
555     if (mState == STATE_ERROR) {
556         ALOGE("%s: Stream in error state", __FUNCTION__);
557         return INVALID_OPERATION;
558     }
559 
560     mTransform = transform;
561     if (mState == STATE_CONFIGURED) {
562         res = native_window_set_buffers_transform(mConsumer.get(),
563                 transform);
564         if (res != OK) {
565             ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
566                     __FUNCTION__, transform, strerror(-res), res);
567         }
568     }
569     return res;
570 }
571 
configureQueueLocked()572 status_t Camera3OutputStream::configureQueueLocked() {
573     status_t res;
574 
575     mTraceFirstBuffer = true;
576     if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
577         return res;
578     }
579 
580     if ((res = configureConsumerQueueLocked(true /*allowPreviewRespace*/)) != OK) {
581         return res;
582     }
583 
584     // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
585     // We need skip these cases as timeout will disable the non-blocking (async) mode.
586     if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
587         if (mUseBufferManager) {
588             // When buffer manager is handling the buffer, we should have available buffers in
589             // buffer queue before we calls into dequeueBuffer because buffer manager is tracking
590             // free buffers.
591             // There are however some consumer side feature (ImageReader::discardFreeBuffers) that
592             // can discard free buffers without notifying buffer manager. We want the timeout to
593             // happen immediately here so buffer manager can try to update its internal state and
594             // try to allocate a buffer instead of waiting.
595             mConsumer->setDequeueTimeout(0);
596         } else {
597             mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
598         }
599     }
600 
601     return OK;
602 }
603 
configureConsumerQueueLocked(bool allowPreviewRespace)604 status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewRespace) {
605     status_t res;
606 
607     mTraceFirstBuffer = true;
608 
609     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
610 
611     // Configure consumer-side ANativeWindow interface. The listener may be used
612     // to notify buffer manager (if it is used) of the returned buffers.
613     res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
614             /*reportBufferRemoval*/true,
615             /*listener*/mBufferProducerListener);
616     if (res != OK) {
617         ALOGE("%s: Unable to connect to native window for stream %d",
618                 __FUNCTION__, mId);
619         return res;
620     }
621 
622     mConsumerName = mConsumer->getConsumerName();
623 
624     res = native_window_set_usage(mConsumer.get(), mUsage);
625     if (res != OK) {
626         ALOGE("%s: Unable to configure usage %" PRIu64 " for stream %d",
627                 __FUNCTION__, mUsage, mId);
628         return res;
629     }
630 
631     res = native_window_set_scaling_mode(mConsumer.get(),
632             NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
633     if (res != OK) {
634         ALOGE("%s: Unable to configure stream scaling: %s (%d)",
635                 __FUNCTION__, strerror(-res), res);
636         return res;
637     }
638 
639     if (mMaxSize == 0) {
640         // For buffers of known size
641         res = native_window_set_buffers_dimensions(mConsumer.get(),
642                 camera_stream::width, camera_stream::height);
643     } else {
644         // For buffers with bounded size
645         res = native_window_set_buffers_dimensions(mConsumer.get(),
646                 mMaxSize, 1);
647     }
648     if (res != OK) {
649         ALOGE("%s: Unable to configure stream buffer dimensions"
650                 " %d x %d (maxSize %zu) for stream %d",
651                 __FUNCTION__, camera_stream::width, camera_stream::height,
652                 mMaxSize, mId);
653         return res;
654     }
655     res = native_window_set_buffers_format(mConsumer.get(),
656             camera_stream::format);
657     if (res != OK) {
658         ALOGE("%s: Unable to configure stream buffer format %#x for stream %d",
659                 __FUNCTION__, camera_stream::format, mId);
660         return res;
661     }
662 
663     res = native_window_set_buffers_data_space(mConsumer.get(),
664             camera_stream::data_space);
665     if (res != OK) {
666         ALOGE("%s: Unable to configure stream dataspace %#x for stream %d",
667                 __FUNCTION__, camera_stream::data_space, mId);
668         return res;
669     }
670 
671     int maxConsumerBuffers;
672     res = static_cast<ANativeWindow*>(mConsumer.get())->query(
673             mConsumer.get(),
674             NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
675     if (res != OK) {
676         ALOGE("%s: Unable to query consumer undequeued"
677                 " buffer count for stream %d", __FUNCTION__, mId);
678         return res;
679     }
680 
681     ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
682             maxConsumerBuffers, camera_stream::max_buffers);
683     if (camera_stream::max_buffers == 0) {
684         ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
685                 __FUNCTION__, camera_stream::max_buffers);
686         return INVALID_OPERATION;
687     }
688 
689     mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
690 
691     int timestampBase = getTimestampBase();
692     bool isDefaultTimeBase = (timestampBase ==
693             OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
694     if (allowPreviewRespace)  {
695         bool forceChoreographer = (timestampBase ==
696                 OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED);
697         bool defaultToChoreographer = (isDefaultTimeBase &&
698                 isConsumedByHWComposer());
699         bool defaultToSpacer = (isDefaultTimeBase &&
700                 isConsumedByHWTexture() &&
701                 !isConsumedByCPU() &&
702                 !isVideoStream());
703         if (forceChoreographer || defaultToChoreographer) {
704             mSyncToDisplay = true;
705             // For choreographer synced stream, extra buffers aren't kept by
706             // camera service. So no need to update mMaxCachedBufferCount.
707             mTotalBufferCount += kDisplaySyncExtraBuffer;
708         } else if (defaultToSpacer) {
709             mPreviewFrameSpacer = new PreviewFrameSpacer(this, mConsumer);
710             // For preview frame spacer, the extra buffer is kept by camera
711             // service. So update mMaxCachedBufferCount.
712             mMaxCachedBufferCount = 1;
713             mTotalBufferCount += mMaxCachedBufferCount;
714             res = mPreviewFrameSpacer->run(String8::format("PreviewSpacer-%d", mId).string());
715             if (res != OK) {
716                 ALOGE("%s: Unable to start preview spacer", __FUNCTION__);
717                 return res;
718             }
719         }
720     }
721     mHandoutTotalBufferCount = 0;
722     mFrameCount = 0;
723     mLastTimestamp = 0;
724 
725     mUseReadoutTime =
726             (timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
727 
728     if (isDeviceTimeBaseRealtime()) {
729         if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
730             // Default time base, but not hardware composer or video encoder
731             mTimestampOffset = 0;
732         } else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
733                 timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
734                 timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
735             mTimestampOffset = 0;
736         }
737         // If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
738         // timestamp offset as bootTime - monotonicTime.
739     } else {
740         if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME) {
741             // Reverse offset for monotonicTime -> bootTime
742             mTimestampOffset = -mTimestampOffset;
743         } else {
744             // If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
745             // CHOREOGRAPHER_SYNCED, timestamp offset is 0.
746             mTimestampOffset = 0;
747         }
748     }
749 
750     res = native_window_set_buffer_count(mConsumer.get(),
751             mTotalBufferCount);
752     if (res != OK) {
753         ALOGE("%s: Unable to set buffer count for stream %d",
754                 __FUNCTION__, mId);
755         return res;
756     }
757 
758     res = native_window_set_buffers_transform(mConsumer.get(),
759             mTransform);
760     if (res != OK) {
761         ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
762                 __FUNCTION__, mTransform, strerror(-res), res);
763         return res;
764     }
765 
766     /**
767      * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs requires
768      * buffers to be statically allocated for internal static buffer registration, while the
769      * buffers provided by buffer manager are really dynamically allocated. Camera3Device only
770      * sets the mBufferManager if device version is > HAL3.2, which guarantees that the buffer
771      * manager setup is skipped in below code. Note that HAL3.2 is also excluded here, as some
772      * HAL3.2 devices may not support the dynamic buffer registeration.
773      * Also Camera3BufferManager does not support display/texture streams as they have its own
774      * buffer management logic.
775      */
776     if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID &&
777             !(isConsumedByHWComposer() || isConsumedByHWTexture())) {
778         uint64_t consumerUsage = 0;
779         getEndpointUsage(&consumerUsage);
780         uint32_t width = (mMaxSize == 0) ? getWidth() : mMaxSize;
781         uint32_t height = (mMaxSize == 0) ? getHeight() : 1;
782         StreamInfo streamInfo(
783                 getId(), getStreamSetId(), width, height, getFormat(), getDataSpace(),
784                 mUsage | consumerUsage, mTotalBufferCount,
785                 /*isConfigured*/true, isMultiResolution());
786         wp<Camera3OutputStream> weakThis(this);
787         res = mBufferManager->registerStream(weakThis,
788                 streamInfo);
789         if (res == OK) {
790             // Disable buffer allocation for this BufferQueue, buffer manager will take over
791             // the buffer allocation responsibility.
792             mConsumer->getIGraphicBufferProducer()->allowAllocation(false);
793             mUseBufferManager = true;
794         } else {
795             ALOGE("%s: Unable to register stream %d to camera3 buffer manager, "
796                   "(error %d %s), fall back to BufferQueue for buffer management!",
797                   __FUNCTION__, mId, res, strerror(-res));
798         }
799     }
800 
801     return OK;
802 }
803 
getBufferLockedCommon(ANativeWindowBuffer ** anb,int * fenceFd)804 status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
805     ATRACE_HFR_CALL();
806     status_t res;
807 
808     if ((res = getBufferPreconditionCheckLocked()) != OK) {
809         return res;
810     }
811 
812     bool gotBufferFromManager = false;
813 
814     if (mUseBufferManager) {
815         sp<GraphicBuffer> gb;
816         res = mBufferManager->getBufferForStream(getId(), getStreamSetId(),
817                 isMultiResolution(), &gb, fenceFd);
818         if (res == OK) {
819             // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
820             // successful return.
821             *anb = gb.get();
822             res = mConsumer->attachBuffer(*anb);
823             if (shouldLogError(res, mState)) {
824                 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
825                         __FUNCTION__, mId, strerror(-res), res);
826             }
827             if (res != OK) {
828                 checkRetAndSetAbandonedLocked(res);
829                 return res;
830             }
831             gotBufferFromManager = true;
832             ALOGV("Stream %d: Attached new buffer", getId());
833         } else if (res == ALREADY_EXISTS) {
834             // Have sufficient free buffers already attached, can just
835             // dequeue from buffer queue
836             ALOGV("Stream %d: Reusing attached buffer", getId());
837             gotBufferFromManager = false;
838         } else if (res != OK) {
839             ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
840                     __FUNCTION__, mId, strerror(-res), res);
841             return res;
842         }
843     }
844     if (!gotBufferFromManager) {
845         /**
846          * Release the lock briefly to avoid deadlock for below scenario:
847          * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
848          * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
849          * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
850          * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
851          * StreamingProcessor lock.
852          * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
853          * and try to lock bufferQueue lock.
854          * Then there is circular locking dependency.
855          */
856         sp<Surface> consumer = mConsumer;
857         size_t remainingBuffers = (mState == STATE_PREPARING ? mTotalBufferCount :
858                                    camera_stream::max_buffers) - mHandoutTotalBufferCount;
859         mLock.unlock();
860 
861         nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
862 
863         size_t batchSize = mBatchSize.load();
864         if (batchSize == 1) {
865             sp<ANativeWindow> anw = consumer;
866             res = anw->dequeueBuffer(anw.get(), anb, fenceFd);
867         } else {
868             std::unique_lock<std::mutex> batchLock(mBatchLock);
869             res = OK;
870             if (mBatchedBuffers.size() == 0) {
871                 if (remainingBuffers == 0) {
872                     ALOGE("%s: cannot get buffer while all buffers are handed out", __FUNCTION__);
873                     return INVALID_OPERATION;
874                 }
875                 if (batchSize > remainingBuffers) {
876                     batchSize = remainingBuffers;
877                 }
878                 batchLock.unlock();
879                 // Refill batched buffers
880                 std::vector<Surface::BatchBuffer> batchedBuffers;
881                 batchedBuffers.resize(batchSize);
882                 res = consumer->dequeueBuffers(&batchedBuffers);
883                 batchLock.lock();
884                 if (res != OK) {
885                     ALOGE("%s: batch dequeueBuffers call failed! %s (%d)",
886                             __FUNCTION__, strerror(-res), res);
887                 } else {
888                     mBatchedBuffers = std::move(batchedBuffers);
889                 }
890             }
891 
892             if (res == OK) {
893                 // Dispatch batch buffers
894                 *anb = mBatchedBuffers.back().buffer;
895                 *fenceFd = mBatchedBuffers.back().fenceFd;
896                 mBatchedBuffers.pop_back();
897             }
898         }
899 
900         nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
901         mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
902 
903         mLock.lock();
904 
905         if (mUseBufferManager && res == TIMED_OUT) {
906             checkRemovedBuffersLocked();
907 
908             sp<GraphicBuffer> gb;
909             res = mBufferManager->getBufferForStream(
910                     getId(), getStreamSetId(), isMultiResolution(),
911                     &gb, fenceFd, /*noFreeBuffer*/true);
912 
913             if (res == OK) {
914                 // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after
915                 // a successful return.
916                 *anb = gb.get();
917                 res = mConsumer->attachBuffer(*anb);
918                 gotBufferFromManager = true;
919                 ALOGV("Stream %d: Attached new buffer", getId());
920 
921                 if (res != OK) {
922                     if (shouldLogError(res, mState)) {
923                         ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
924                                 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
925                     }
926                     checkRetAndSetAbandonedLocked(res);
927                     return res;
928                 }
929             } else {
930                 ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager:"
931                         " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
932                 return res;
933             }
934         } else if (res != OK) {
935             if (shouldLogError(res, mState)) {
936                 ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
937                         __FUNCTION__, mId, strerror(-res), res);
938             }
939             checkRetAndSetAbandonedLocked(res);
940             return res;
941         }
942     }
943 
944     if (res == OK) {
945         checkRemovedBuffersLocked();
946     }
947 
948     return res;
949 }
950 
checkRemovedBuffersLocked(bool notifyBufferManager)951 void Camera3OutputStream::checkRemovedBuffersLocked(bool notifyBufferManager) {
952     std::vector<sp<GraphicBuffer>> removedBuffers;
953     status_t res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
954     if (res == OK) {
955         onBuffersRemovedLocked(removedBuffers);
956 
957         if (notifyBufferManager && mUseBufferManager && removedBuffers.size() > 0) {
958             mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), isMultiResolution(),
959                     removedBuffers.size());
960         }
961     }
962 }
963 
checkRetAndSetAbandonedLocked(status_t res)964 void Camera3OutputStream::checkRetAndSetAbandonedLocked(status_t res) {
965     // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is
966     // STATE_PREPARING, let prepareNextBuffer handle the error.)
967     if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
968         mState = STATE_ABANDONED;
969     }
970 }
971 
shouldLogError(status_t res,StreamState state)972 bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
973     if (res == OK) {
974         return false;
975     }
976     if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
977         return false;
978     }
979     return true;
980 }
981 
onCachedBufferQueued()982 void Camera3OutputStream::onCachedBufferQueued() {
983     Mutex::Autolock l(mLock);
984     mCachedOutputBufferCount--;
985     // Signal whoever is waiting for the buffer to be returned to the buffer
986     // queue.
987     mOutputBufferReturnedSignal.signal();
988 }
989 
disconnectLocked()990 status_t Camera3OutputStream::disconnectLocked() {
991     status_t res;
992 
993     if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
994         return res;
995     }
996 
997     // Stream configuration was not finished (can only be in STATE_IN_CONFIG or STATE_CONSTRUCTED
998     // state), don't need change the stream state, return OK.
999     if (mConsumer == nullptr) {
1000         return OK;
1001     }
1002 
1003     returnPrefetchedBuffersLocked();
1004 
1005     if (mPreviewFrameSpacer != nullptr) {
1006         mPreviewFrameSpacer->requestExit();
1007     }
1008 
1009     ALOGV("%s: disconnecting stream %d from native window", __FUNCTION__, getId());
1010 
1011     res = native_window_api_disconnect(mConsumer.get(),
1012                                        NATIVE_WINDOW_API_CAMERA);
1013     /**
1014      * This is not an error. if client calling process dies, the window will
1015      * also die and all calls to it will return DEAD_OBJECT, thus it's already
1016      * "disconnected"
1017      */
1018     if (res == DEAD_OBJECT) {
1019         ALOGW("%s: While disconnecting stream %d from native window, the"
1020                 " native window died from under us", __FUNCTION__, mId);
1021     }
1022     else if (res != OK) {
1023         ALOGE("%s: Unable to disconnect stream %d from native window "
1024               "(error %d %s)",
1025               __FUNCTION__, mId, res, strerror(-res));
1026         mState = STATE_ERROR;
1027         return res;
1028     }
1029 
1030     // Since device is already idle, there is no getBuffer call to buffer manager, unregister the
1031     // stream at this point should be safe.
1032     if (mUseBufferManager) {
1033         res = mBufferManager->unregisterStream(getId(), getStreamSetId(), isMultiResolution());
1034         if (res != OK) {
1035             ALOGE("%s: Unable to unregister stream %d from buffer manager "
1036                     "(error %d %s)", __FUNCTION__, mId, res, strerror(-res));
1037             mState = STATE_ERROR;
1038             return res;
1039         }
1040         // Note that, to make prepare/teardown case work, we must not mBufferManager.clear(), as
1041         // the stream is still in usable state after this call.
1042         mUseBufferManager = false;
1043     }
1044 
1045     mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
1046                                            : STATE_CONSTRUCTED;
1047 
1048     mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
1049     mDequeueBufferLatency.reset();
1050     return OK;
1051 }
1052 
getEndpointUsage(uint64_t * usage) const1053 status_t Camera3OutputStream::getEndpointUsage(uint64_t *usage) const {
1054 
1055     status_t res;
1056 
1057     if (mConsumer == nullptr) {
1058         // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
1059         *usage = mConsumerUsage;
1060         return OK;
1061     }
1062 
1063     res = getEndpointUsageForSurface(usage, mConsumer);
1064 
1065     return res;
1066 }
1067 
applyZSLUsageQuirk(int format,uint64_t * consumerUsage)1068 void Camera3OutputStream::applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/) {
1069     if (consumerUsage == nullptr) {
1070         return;
1071     }
1072 
1073     // If an opaque output stream's endpoint is ImageReader, add
1074     // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
1075     // for the ZSL use case.
1076     // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set:
1077     //     1. GRALLOC_USAGE_HW_TEXTURE
1078     //     2. GRALLOC_USAGE_HW_RENDER
1079     //     3. GRALLOC_USAGE_HW_COMPOSER
1080     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
1081     if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
1082             (*consumerUsage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
1083             GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
1084         *consumerUsage |= GRALLOC_USAGE_HW_CAMERA_ZSL;
1085     }
1086 }
1087 
getEndpointUsageForSurface(uint64_t * usage,const sp<Surface> & surface) const1088 status_t Camera3OutputStream::getEndpointUsageForSurface(uint64_t *usage,
1089         const sp<Surface>& surface) const {
1090     status_t res;
1091     uint64_t u = 0;
1092 
1093     res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(surface.get()), &u);
1094     applyZSLUsageQuirk(camera_stream::format, &u);
1095     *usage = u;
1096     return res;
1097 }
1098 
isVideoStream() const1099 bool Camera3OutputStream::isVideoStream() const {
1100     uint64_t usage = 0;
1101     status_t res = getEndpointUsage(&usage);
1102     if (res != OK) {
1103         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1104         return false;
1105     }
1106 
1107     return (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0;
1108 }
1109 
setBufferManager(sp<Camera3BufferManager> bufferManager)1110 status_t Camera3OutputStream::setBufferManager(sp<Camera3BufferManager> bufferManager) {
1111     Mutex::Autolock l(mLock);
1112     if (mState != STATE_CONSTRUCTED) {
1113         ALOGE("%s: this method can only be called when stream in CONSTRUCTED state.",
1114                 __FUNCTION__);
1115         return INVALID_OPERATION;
1116     }
1117     mBufferManager = bufferManager;
1118 
1119     return OK;
1120 }
1121 
updateStream(const std::vector<sp<Surface>> &,const std::vector<OutputStreamInfo> &,const std::vector<size_t> &,KeyedVector<sp<Surface>,size_t> *)1122 status_t Camera3OutputStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
1123             const std::vector<OutputStreamInfo> &/*outputInfo*/,
1124             const std::vector<size_t> &/*removedSurfaceIds*/,
1125             KeyedVector<sp<Surface>, size_t> * /*outputMapo*/) {
1126     ALOGE("%s: this method is not supported!", __FUNCTION__);
1127     return INVALID_OPERATION;
1128 }
1129 
onBufferReleased()1130 void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
1131     sp<Camera3OutputStream> stream = mParent.promote();
1132     if (stream == nullptr) {
1133         ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1134         return;
1135     }
1136 
1137     Mutex::Autolock l(stream->mLock);
1138     if (!(stream->mUseBufferManager)) {
1139         return;
1140     }
1141 
1142     ALOGV("Stream %d: Buffer released", stream->getId());
1143     bool shouldFreeBuffer = false;
1144     status_t res = stream->mBufferManager->onBufferReleased(
1145         stream->getId(), stream->getStreamSetId(), stream->isMultiResolution(),
1146         &shouldFreeBuffer);
1147     if (res != OK) {
1148         ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
1149                 strerror(-res), res);
1150         stream->mState = STATE_ERROR;
1151     }
1152 
1153     if (shouldFreeBuffer) {
1154         sp<GraphicBuffer> buffer;
1155         // Detach and free a buffer (when buffer goes out of scope)
1156         stream->detachBufferLocked(&buffer, /*fenceFd*/ nullptr);
1157         if (buffer.get() != nullptr) {
1158             stream->mBufferManager->notifyBufferRemoved(
1159                     stream->getId(), stream->getStreamSetId(), stream->isMultiResolution());
1160         }
1161     }
1162 }
1163 
onBuffersDiscarded(const std::vector<sp<GraphicBuffer>> & buffers)1164 void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
1165         const std::vector<sp<GraphicBuffer>>& buffers) {
1166     sp<Camera3OutputStream> stream = mParent.promote();
1167     if (stream == nullptr) {
1168         ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1169         return;
1170     }
1171 
1172     if (buffers.size() > 0) {
1173         Mutex::Autolock l(stream->mLock);
1174         stream->onBuffersRemovedLocked(buffers);
1175         if (stream->mUseBufferManager) {
1176             stream->mBufferManager->onBuffersRemoved(stream->getId(),
1177                     stream->getStreamSetId(), stream->isMultiResolution(), buffers.size());
1178         }
1179         ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
1180     }
1181 }
1182 
onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>> & removedBuffers)1183 void Camera3OutputStream::onBuffersRemovedLocked(
1184         const std::vector<sp<GraphicBuffer>>& removedBuffers) {
1185     sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
1186     if (callback != nullptr) {
1187         for (const auto& gb : removedBuffers) {
1188             callback->onBufferFreed(mId, gb->handle);
1189         }
1190     }
1191 }
1192 
detachBuffer(sp<GraphicBuffer> * buffer,int * fenceFd)1193 status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
1194     Mutex::Autolock l(mLock);
1195     return detachBufferLocked(buffer, fenceFd);
1196 }
1197 
detachBufferLocked(sp<GraphicBuffer> * buffer,int * fenceFd)1198 status_t Camera3OutputStream::detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd) {
1199     ALOGV("Stream %d: detachBuffer", getId());
1200     if (buffer == nullptr) {
1201         return BAD_VALUE;
1202     }
1203 
1204     sp<Fence> fence;
1205     status_t res = mConsumer->detachNextBuffer(buffer, &fence);
1206     if (res == NO_MEMORY) {
1207         // This may rarely happen, which indicates that the released buffer was freed by other
1208         // call (e.g., attachBuffer, dequeueBuffer etc.) before reaching here. We should notify the
1209         // buffer manager that this buffer has been freed. It's not fatal, but should be avoided,
1210         // therefore log a warning.
1211         *buffer = 0;
1212         ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
1213     } else if (res != OK) {
1214         // Treat other errors as abandonment
1215         if (shouldLogError(res, mState)) {
1216             ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1217         }
1218         mState = STATE_ABANDONED;
1219         return res;
1220     }
1221 
1222     if (fenceFd != nullptr) {
1223         if (fence!= 0 && fence->isValid()) {
1224             *fenceFd = fence->dup();
1225         } else {
1226             *fenceFd = -1;
1227         }
1228     }
1229 
1230     // Here we assume detachBuffer is called by buffer manager so it doesn't need to be notified
1231     checkRemovedBuffersLocked(/*notifyBufferManager*/false);
1232     return res;
1233 }
1234 
dropBuffers(bool dropping)1235 status_t Camera3OutputStream::dropBuffers(bool dropping) {
1236     Mutex::Autolock l(mLock);
1237     mDropBuffers = dropping;
1238     return OK;
1239 }
1240 
getPhysicalCameraId() const1241 const String8& Camera3OutputStream::getPhysicalCameraId() const {
1242     Mutex::Autolock l(mLock);
1243     return physicalCameraId();
1244 }
1245 
notifyBufferReleased(ANativeWindowBuffer *)1246 status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
1247     return OK;
1248 }
1249 
isConsumerConfigurationDeferred(size_t surface_id) const1250 bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
1251     Mutex::Autolock l(mLock);
1252 
1253     if (surface_id != 0) {
1254         ALOGE("%s: surface_id %zu for Camera3OutputStream should be 0!", __FUNCTION__, surface_id);
1255     }
1256     return mConsumer == nullptr;
1257 }
1258 
setConsumers(const std::vector<sp<Surface>> & consumers)1259 status_t Camera3OutputStream::setConsumers(const std::vector<sp<Surface>>& consumers) {
1260     Mutex::Autolock l(mLock);
1261     if (consumers.size() != 1) {
1262         ALOGE("%s: it's illegal to set %zu consumer surfaces!",
1263                   __FUNCTION__, consumers.size());
1264         return INVALID_OPERATION;
1265     }
1266     if (consumers[0] == nullptr) {
1267         ALOGE("%s: it's illegal to set null consumer surface!", __FUNCTION__);
1268         return INVALID_OPERATION;
1269     }
1270 
1271     if (mConsumer != nullptr) {
1272         ALOGE("%s: consumer surface was already set!", __FUNCTION__);
1273         return INVALID_OPERATION;
1274     }
1275 
1276     mConsumer = consumers[0];
1277     return OK;
1278 }
1279 
isConsumedByHWComposer() const1280 bool Camera3OutputStream::isConsumedByHWComposer() const {
1281     uint64_t usage = 0;
1282     status_t res = getEndpointUsage(&usage);
1283     if (res != OK) {
1284         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1285         return false;
1286     }
1287 
1288     return (usage & GRALLOC_USAGE_HW_COMPOSER) != 0;
1289 }
1290 
isConsumedByHWTexture() const1291 bool Camera3OutputStream::isConsumedByHWTexture() const {
1292     uint64_t usage = 0;
1293     status_t res = getEndpointUsage(&usage);
1294     if (res != OK) {
1295         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1296         return false;
1297     }
1298 
1299     return (usage & GRALLOC_USAGE_HW_TEXTURE) != 0;
1300 }
1301 
isConsumedByCPU() const1302 bool Camera3OutputStream::isConsumedByCPU() const {
1303     uint64_t usage = 0;
1304     status_t res = getEndpointUsage(&usage);
1305     if (res != OK) {
1306         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1307         return false;
1308     }
1309 
1310     return (usage & GRALLOC_USAGE_SW_READ_MASK) != 0;
1311 }
1312 
dumpImageToDisk(nsecs_t timestamp,ANativeWindowBuffer * anwBuffer,int fence)1313 void Camera3OutputStream::dumpImageToDisk(nsecs_t timestamp,
1314         ANativeWindowBuffer* anwBuffer, int fence) {
1315     // Deriver output file name
1316     std::string fileExtension = "jpg";
1317     char imageFileName[64];
1318     time_t now = time(0);
1319     tm *localTime = localtime(&now);
1320     snprintf(imageFileName, sizeof(imageFileName), "IMG_%4d%02d%02d_%02d%02d%02d_%" PRId64 ".%s",
1321             1900 + localTime->tm_year, localTime->tm_mon + 1, localTime->tm_mday,
1322             localTime->tm_hour, localTime->tm_min, localTime->tm_sec,
1323             timestamp, fileExtension.c_str());
1324 
1325     // Lock the image for CPU read
1326     sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
1327     void* mapped = nullptr;
1328     base::unique_fd fenceFd(dup(fence));
1329     status_t res = graphicBuffer->lockAsync(GraphicBuffer::USAGE_SW_READ_OFTEN, &mapped,
1330             fenceFd.get());
1331     if (res != OK) {
1332         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
1333         return;
1334     }
1335 
1336     // Figure out actual file size
1337     auto actualJpegSize = android::camera2::JpegProcessor::findJpegSize((uint8_t*)mapped, mMaxSize);
1338     if (actualJpegSize == 0) {
1339         actualJpegSize = mMaxSize;
1340     }
1341 
1342     // Output image data to file
1343     std::string filePath = "/data/misc/cameraserver/";
1344     filePath += imageFileName;
1345     std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
1346     if (!imageFile.is_open()) {
1347         ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
1348         graphicBuffer->unlock();
1349         return;
1350     }
1351     imageFile.write((const char*)mapped, actualJpegSize);
1352 
1353     graphicBuffer->unlock();
1354 }
1355 
setBatchSize(size_t batchSize)1356 status_t Camera3OutputStream::setBatchSize(size_t batchSize) {
1357     Mutex::Autolock l(mLock);
1358     if (batchSize == 0) {
1359         ALOGE("%s: invalid batch size 0", __FUNCTION__);
1360         return BAD_VALUE;
1361     }
1362 
1363     if (mUseBufferManager) {
1364         ALOGE("%s: batch operation is not supported with buffer manager", __FUNCTION__);
1365         return INVALID_OPERATION;
1366     }
1367 
1368     if (!isVideoStream()) {
1369         ALOGE("%s: batch operation is not supported with non-video stream", __FUNCTION__);
1370         return INVALID_OPERATION;
1371     }
1372 
1373     if (camera_stream::max_buffers < batchSize) {
1374         ALOGW("%s: batch size is capped by max_buffers %d", __FUNCTION__,
1375                 camera_stream::max_buffers);
1376         batchSize = camera_stream::max_buffers;
1377     }
1378 
1379     size_t defaultBatchSize = 1;
1380     if (!mBatchSize.compare_exchange_strong(defaultBatchSize, batchSize)) {
1381         ALOGE("%s: change batch size from %zu to %zu dynamically is not supported",
1382                 __FUNCTION__, defaultBatchSize, batchSize);
1383         return INVALID_OPERATION;
1384     }
1385 
1386     return OK;
1387 }
1388 
onMinDurationChanged(nsecs_t duration,bool fixedFps)1389 void Camera3OutputStream::onMinDurationChanged(nsecs_t duration, bool fixedFps) {
1390     Mutex::Autolock l(mLock);
1391     mMinExpectedDuration = duration;
1392     mFixedFps = fixedFps;
1393 }
1394 
setStreamUseCase(int64_t streamUseCase)1395 void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
1396     Mutex::Autolock l(mLock);
1397     camera_stream::use_case = streamUseCase;
1398 }
1399 
returnPrefetchedBuffersLocked()1400 void Camera3OutputStream::returnPrefetchedBuffersLocked() {
1401     std::vector<Surface::BatchBuffer> batchedBuffers;
1402 
1403     {
1404         std::lock_guard<std::mutex> batchLock(mBatchLock);
1405         if (mBatchedBuffers.size() != 0) {
1406             ALOGW("%s: %zu extra prefetched buffers detected. Returning",
1407                    __FUNCTION__, mBatchedBuffers.size());
1408             batchedBuffers = std::move(mBatchedBuffers);
1409         }
1410     }
1411 
1412     if (batchedBuffers.size() > 0) {
1413         mConsumer->cancelBuffers(batchedBuffers);
1414     }
1415 }
1416 
syncTimestampToDisplayLocked(nsecs_t t)1417 nsecs_t Camera3OutputStream::syncTimestampToDisplayLocked(nsecs_t t) {
1418     nsecs_t currentTime = systemTime();
1419     if (!mFixedFps) {
1420         mLastCaptureTime = t;
1421         mLastPresentTime = currentTime;
1422         return t;
1423     }
1424 
1425     ParcelableVsyncEventData parcelableVsyncEventData;
1426     auto res = mDisplayEventReceiver.getLatestVsyncEventData(&parcelableVsyncEventData);
1427     if (res != OK) {
1428         ALOGE("%s: Stream %d: Error getting latest vsync event data: %s (%d)",
1429                 __FUNCTION__, mId, strerror(-res), res);
1430         mLastCaptureTime = t;
1431         mLastPresentTime = currentTime;
1432         return t;
1433     }
1434 
1435     const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
1436     nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
1437 
1438     // Find the best presentation time without worrying about previous frame's
1439     // presentation time if capture interval is more than kSpacingResetIntervalNs.
1440     //
1441     // When frame interval is more than 50 ms apart (3 vsyncs for 60hz refresh rate),
1442     // there is little risk in starting over and finding the earliest vsync to latch onto.
1443     // - Update captureToPresentTime offset to be used for later frames.
1444     // - Example use cases:
1445     //   - when frame rate drops down to below 20 fps, or
1446     //   - A new streaming session starts (stopPreview followed by
1447     //   startPreview)
1448     //
1449     nsecs_t captureInterval = t - mLastCaptureTime;
1450     if (captureInterval > kSpacingResetIntervalNs) {
1451         for (size_t i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
1452             const auto& timeline = vsyncEventData.frameTimelines[i];
1453             if (timeline.deadlineTimestamp >= currentTime &&
1454                     timeline.expectedPresentationTime > minPresentT) {
1455                 nsecs_t presentT = vsyncEventData.frameTimelines[i].expectedPresentationTime;
1456                 mCaptureToPresentOffset = presentT - t;
1457                 mLastCaptureTime = t;
1458                 mLastPresentTime = presentT;
1459 
1460                 // Move the expected presentation time back by 1/3 of frame interval to
1461                 // mitigate the time drift. Due to time drift, if we directly use the
1462                 // expected presentation time, often times 2 expected presentation time
1463                 // falls into the same VSYNC interval.
1464                 return presentT - vsyncEventData.frameInterval/3;
1465             }
1466         }
1467     }
1468 
1469     nsecs_t idealPresentT = t + mCaptureToPresentOffset;
1470     nsecs_t expectedPresentT = mLastPresentTime;
1471     nsecs_t minDiff = INT64_MAX;
1472 
1473     // In fixed FPS case, when frame durations are close to multiples of display refresh
1474     // rate, derive minimum intervals between presentation times based on minimal
1475     // expected duration. The minimum number of Vsyncs is:
1476     // - 0 if minFrameDuration in (0, 1.5] * vSyncInterval,
1477     // - 1 if minFrameDuration in (1.5, 2.5] * vSyncInterval,
1478     // - and so on.
1479     //
1480     // This spaces out the displaying of the frames so that the frame
1481     // presentations are roughly in sync with frame captures.
1482     int minVsyncs = (mMinExpectedDuration - vsyncEventData.frameInterval / 2) /
1483             vsyncEventData.frameInterval;
1484     if (minVsyncs < 0) minVsyncs = 0;
1485     nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval;
1486 
1487     // In fixed FPS case, if the frame duration deviates from multiples of
1488     // display refresh rate, find the closest Vsync without requiring a minimum
1489     // number of Vsync.
1490     //
1491     // Example: (24fps camera, 60hz refresh):
1492     //   capture readout:  |  t1  |  t1  | .. |  t1  | .. |  t1  | .. |  t1  |
1493     //   display VSYNC:      | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1494     //   |  : 1 frame
1495     //   t1 : 41.67ms
1496     //   t2 : 16.67ms
1497     //   t1/t2 = 2.5
1498     //
1499     //   24fps is a commonly used video frame rate. Because the capture
1500     //   interval is 2.5 times of display refresh interval, the minVsyncs
1501     //   calculation will directly fall at the boundary condition. In this case,
1502     //   we should fall back to the basic logic of finding closest vsync
1503     //   timestamp without worrying about minVsyncs.
1504     float captureToVsyncIntervalRatio = 1.0f * mMinExpectedDuration / vsyncEventData.frameInterval;
1505     float ratioDeviation = std::fabs(
1506             captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
1507     bool captureDeviateFromVsync = ratioDeviation >= kMaxIntervalRatioDeviation;
1508     bool cameraDisplayInSync = (mFixedFps && !captureDeviateFromVsync);
1509 
1510     // Find best timestamp in the vsync timelines:
1511     // - Only use at most kMaxTimelines timelines to avoid long latency
1512     // - closest to the ideal presentation time,
1513     // - deadline timestamp is greater than the current time, and
1514     // - For fixed FPS, if the capture interval doesn't deviate too much from refresh interval,
1515     //   the candidate presentation time is at least minInterval in the future compared to last
1516     //   presentation time.
1517     // - For variable FPS, or if the capture interval deviates from refresh
1518     //   interval for more than 5%, find a presentation time closest to the
1519     //   (lastPresentationTime + captureToPresentOffset) instead.
1520     int maxTimelines = std::min(kMaxTimelines, (int)VsyncEventData::kFrameTimelinesLength);
1521     float biasForShortDelay = 1.0f;
1522     for (int i = 0; i < maxTimelines; i ++) {
1523         const auto& vsyncTime = vsyncEventData.frameTimelines[i];
1524         if (minVsyncs > 0) {
1525             // Bias towards using smaller timeline index:
1526             //   i = 0:                bias = 1
1527             //   i = maxTimelines-1:   bias = -1
1528             biasForShortDelay = 1.0 - 2.0 * i / (maxTimelines - 1);
1529         }
1530         if (std::abs(vsyncTime.expectedPresentationTime - idealPresentT) < minDiff &&
1531                 vsyncTime.deadlineTimestamp >= currentTime &&
1532                 ((!cameraDisplayInSync && vsyncTime.expectedPresentationTime > minPresentT) ||
1533                  (cameraDisplayInSync && vsyncTime.expectedPresentationTime >
1534                 mLastPresentTime + minInterval +
1535                     static_cast<nsecs_t>(biasForShortDelay * kTimelineThresholdNs)))) {
1536             expectedPresentT = vsyncTime.expectedPresentationTime;
1537             minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
1538         }
1539     }
1540 
1541     if (expectedPresentT == mLastPresentTime && expectedPresentT <
1542             vsyncEventData.frameTimelines[maxTimelines-1].expectedPresentationTime) {
1543         // Couldn't find a reasonable presentation time. Using last frame's
1544         // presentation time would cause a frame drop. The best option now
1545         // is to use the next VSync as long as the last presentation time
1546         // doesn't already has the maximum latency, in which case dropping the
1547         // buffer is more desired than increasing latency.
1548         //
1549         // Example: (60fps camera, 59.9hz refresh):
1550         //   capture readout:  | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
1551         //                      \    \    \     \    \    \    \     \   \
1552         //   queue to BQ:       |    |    |     |    |    |    |      |    |
1553         //                      \    \    \     \    \     \    \      \    \
1554         //   display VSYNC:      | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1555         //
1556         //   |: 1 frame
1557         //   t1 : 16.67ms
1558         //   t2 : 16.69ms
1559         //
1560         // It takes 833 frames for capture readout count and display VSYNC count to be off
1561         // by 1.
1562         //  - At frames [0, 832], presentationTime is set to timeline[0]
1563         //  - At frames [833, 833*2-1], presentationTime is set to timeline[1]
1564         //  - At frames [833*2, 833*3-1] presentationTime is set to timeline[2]
1565         //  - At frame 833*3, no presentation time is found because we only
1566         //    search for timeline[0..2].
1567         //  - Drop one buffer is better than further extend the presentation
1568         //    time.
1569         //
1570         // However, if frame 833*2 arrives 16.67ms early (right after frame
1571         // 833*2-1), no presentation time can be found because
1572         // getLatestVsyncEventData is called early. In that case, it's better to
1573         // set presentation time by offseting last presentation time.
1574         expectedPresentT += vsyncEventData.frameInterval;
1575     }
1576 
1577     mLastCaptureTime = t;
1578     mLastPresentTime = expectedPresentT;
1579 
1580     // Move the expected presentation time back by 1/3 of frame interval to
1581     // mitigate the time drift. Due to time drift, if we directly use the
1582     // expected presentation time, often times 2 expected presentation time
1583     // falls into the same VSYNC interval.
1584     return expectedPresentT - vsyncEventData.frameInterval/3;
1585 }
1586 
shouldLogError(status_t res)1587 bool Camera3OutputStream::shouldLogError(status_t res) {
1588     Mutex::Autolock l(mLock);
1589     return shouldLogError(res, mState);
1590 }
1591 
1592 }; // namespace camera3
1593 
1594 }; // namespace android
1595