1 /*
2 * Copyright (C) 2013-2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Camera3-OutputStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20
21 #include <algorithm>
22 #include <ctime>
23 #include <fstream>
24
25 #include <aidl/android/hardware/camera/device/CameraBlob.h>
26 #include <aidl/android/hardware/camera/device/CameraBlobId.h>
27 #include "aidl/android/hardware/graphics/common/Dataspace.h"
28
29 #include <android-base/unique_fd.h>
30 #include <cutils/properties.h>
31 #include <ui/GraphicBuffer.h>
32 #include <utils/Log.h>
33 #include <utils/Trace.h>
34
35 #include <common/CameraDeviceBase.h>
36 #include "api1/client2/JpegProcessor.h"
37 #include "Camera3OutputStream.h"
38 #include "utils/TraceHFR.h"
39
40 #ifndef container_of
41 #define container_of(ptr, type, member) \
42 (type *)((char*)(ptr) - offsetof(type, member))
43 #endif
44
45 namespace android {
46
47 namespace camera3 {
48
49 using aidl::android::hardware::camera::device::CameraBlob;
50 using aidl::android::hardware::camera::device::CameraBlobId;
51
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)52 Camera3OutputStream::Camera3OutputStream(int id,
53 sp<Surface> consumer,
54 uint32_t width, uint32_t height, int format,
55 android_dataspace dataSpace, camera_stream_rotation_t rotation,
56 nsecs_t timestampOffset, const String8& physicalCameraId,
57 const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
58 int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
59 int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
60 int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
61 Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
62 /*maxSize*/0, format, dataSpace, rotation,
63 physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
64 dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
65 timestampBase, colorSpace),
66 mConsumer(consumer),
67 mTransform(0),
68 mTraceFirstBuffer(true),
69 mUseBufferManager(false),
70 mTimestampOffset(timestampOffset),
71 mUseReadoutTime(useReadoutTimestamp),
72 mConsumerUsage(0),
73 mDropBuffers(false),
74 mMirrorMode(mirrorMode),
75 mDequeueBufferLatency(kDequeueLatencyBinSize),
76 mIPCTransport(transport) {
77
78 if (mConsumer == NULL) {
79 ALOGE("%s: Consumer is NULL!", __FUNCTION__);
80 mState = STATE_ERROR;
81 }
82
83 bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
84 mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
85 }
86
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,size_t maxSize,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)87 Camera3OutputStream::Camera3OutputStream(int id,
88 sp<Surface> consumer,
89 uint32_t width, uint32_t height, size_t maxSize, int format,
90 android_dataspace dataSpace, camera_stream_rotation_t rotation,
91 nsecs_t timestampOffset, const String8& physicalCameraId,
92 const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
93 int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
94 int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
95 int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
96 Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
97 format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
98 setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
99 deviceTimeBaseIsRealtime, timestampBase, colorSpace),
100 mConsumer(consumer),
101 mTransform(0),
102 mTraceFirstBuffer(true),
103 mUseBufferManager(false),
104 mTimestampOffset(timestampOffset),
105 mUseReadoutTime(useReadoutTimestamp),
106 mConsumerUsage(0),
107 mDropBuffers(false),
108 mMirrorMode(mirrorMode),
109 mDequeueBufferLatency(kDequeueLatencyBinSize),
110 mIPCTransport(transport) {
111
112 if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
113 ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
114 format);
115 mState = STATE_ERROR;
116 }
117
118 if (mConsumer == NULL) {
119 ALOGE("%s: Consumer is NULL!", __FUNCTION__);
120 mState = STATE_ERROR;
121 }
122
123 bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
124 mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
125 }
126
Camera3OutputStream(int id,uint32_t width,uint32_t height,int format,uint64_t consumerUsage,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)127 Camera3OutputStream::Camera3OutputStream(int id,
128 uint32_t width, uint32_t height, int format,
129 uint64_t consumerUsage, android_dataspace dataSpace,
130 camera_stream_rotation_t rotation, nsecs_t timestampOffset,
131 const String8& physicalCameraId,
132 const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
133 int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
134 int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
135 int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
136 Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
137 /*maxSize*/0, format, dataSpace, rotation,
138 physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
139 dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
140 timestampBase, colorSpace),
141 mConsumer(nullptr),
142 mTransform(0),
143 mTraceFirstBuffer(true),
144 mUseBufferManager(false),
145 mTimestampOffset(timestampOffset),
146 mUseReadoutTime(useReadoutTimestamp),
147 mConsumerUsage(consumerUsage),
148 mDropBuffers(false),
149 mMirrorMode(mirrorMode),
150 mDequeueBufferLatency(kDequeueLatencyBinSize),
151 mIPCTransport(transport) {
152 // Deferred consumer only support preview surface format now.
153 if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
154 ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
155 __FUNCTION__);
156 mState = STATE_ERROR;
157 }
158
159 // Validation check for the consumer usage flag.
160 if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
161 (consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
162 ALOGE("%s: Deferred consumer usage flag is illegal %" PRIu64 "!",
163 __FUNCTION__, consumerUsage);
164 mState = STATE_ERROR;
165 }
166
167 mConsumerName = String8("Deferred");
168 bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
169 mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
170 }
171
Camera3OutputStream(int id,camera_stream_type_t type,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,uint64_t consumerUsage,nsecs_t timestampOffset,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)172 Camera3OutputStream::Camera3OutputStream(int id, camera_stream_type_t type,
173 uint32_t width, uint32_t height,
174 int format,
175 android_dataspace dataSpace,
176 camera_stream_rotation_t rotation,
177 const String8& physicalCameraId,
178 const std::unordered_set<int32_t> &sensorPixelModesUsed,
179 IPCTransport transport,
180 uint64_t consumerUsage, nsecs_t timestampOffset,
181 int setId, bool isMultiResolution,
182 int64_t dynamicRangeProfile, int64_t streamUseCase,
183 bool deviceTimeBaseIsRealtime, int timestampBase,
184 int mirrorMode, int32_t colorSpace,
185 bool useReadoutTimestamp) :
186 Camera3IOStreamBase(id, type, width, height,
187 /*maxSize*/0,
188 format, dataSpace, rotation,
189 physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
190 dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
191 timestampBase, colorSpace),
192 mTransform(0),
193 mTraceFirstBuffer(true),
194 mUseBufferManager(false),
195 mTimestampOffset(timestampOffset),
196 mUseReadoutTime(useReadoutTimestamp),
197 mConsumerUsage(consumerUsage),
198 mDropBuffers(false),
199 mMirrorMode(mirrorMode),
200 mDequeueBufferLatency(kDequeueLatencyBinSize),
201 mIPCTransport(transport) {
202
203 bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
204 mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
205
206 // Subclasses expected to initialize mConsumer themselves
207 }
208
209
~Camera3OutputStream()210 Camera3OutputStream::~Camera3OutputStream() {
211 disconnectLocked();
212 }
213
getBufferLocked(camera_stream_buffer * buffer,const std::vector<size_t> &)214 status_t Camera3OutputStream::getBufferLocked(camera_stream_buffer *buffer,
215 const std::vector<size_t>&) {
216 ATRACE_HFR_CALL();
217
218 ANativeWindowBuffer* anb;
219 int fenceFd = -1;
220
221 status_t res;
222 res = getBufferLockedCommon(&anb, &fenceFd);
223 if (res != OK) {
224 return res;
225 }
226
227 /**
228 * FenceFD now owned by HAL except in case of error,
229 * in which case we reassign it to acquire_fence
230 */
231 handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
232 /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
233
234 return OK;
235 }
236
getBuffersLocked(std::vector<OutstandingBuffer> * outBuffers)237 status_t Camera3OutputStream::getBuffersLocked(std::vector<OutstandingBuffer>* outBuffers) {
238 status_t res;
239
240 if ((res = getBufferPreconditionCheckLocked()) != OK) {
241 return res;
242 }
243
244 if (mUseBufferManager) {
245 ALOGE("%s: stream %d is managed by buffer manager and does not support batch operation",
246 __FUNCTION__, mId);
247 return INVALID_OPERATION;
248 }
249
250 sp<Surface> consumer = mConsumer;
251 /**
252 * Release the lock briefly to avoid deadlock for below scenario:
253 * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
254 * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
255 * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
256 * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
257 * StreamingProcessor lock.
258 * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
259 * and try to lock bufferQueue lock.
260 * Then there is circular locking dependency.
261 */
262 mLock.unlock();
263
264 size_t numBuffersRequested = outBuffers->size();
265 std::vector<Surface::BatchBuffer> buffers(numBuffersRequested);
266
267 nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
268 res = consumer->dequeueBuffers(&buffers);
269 nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
270 mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
271
272 mLock.lock();
273
274 if (res != OK) {
275 if (shouldLogError(res, mState)) {
276 ALOGE("%s: Stream %d: Can't dequeue %zu output buffers: %s (%d)",
277 __FUNCTION__, mId, numBuffersRequested, strerror(-res), res);
278 }
279 checkRetAndSetAbandonedLocked(res);
280 return res;
281 }
282 checkRemovedBuffersLocked();
283
284 /**
285 * FenceFD now owned by HAL except in case of error,
286 * in which case we reassign it to acquire_fence
287 */
288 for (size_t i = 0; i < numBuffersRequested; i++) {
289 handoutBufferLocked(*(outBuffers->at(i).outBuffer),
290 &(buffers[i].buffer->handle), /*acquireFence*/buffers[i].fenceFd,
291 /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
292 }
293 return OK;
294 }
295
queueBufferToConsumer(sp<ANativeWindow> & consumer,ANativeWindowBuffer * buffer,int anwReleaseFence,const std::vector<size_t> &)296 status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
297 ANativeWindowBuffer* buffer, int anwReleaseFence,
298 const std::vector<size_t>&) {
299 return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
300 }
301
returnBufferLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,int32_t transform,const std::vector<size_t> & surface_ids)302 status_t Camera3OutputStream::returnBufferLocked(
303 const camera_stream_buffer &buffer,
304 nsecs_t timestamp, nsecs_t readoutTimestamp,
305 int32_t transform, const std::vector<size_t>& surface_ids) {
306 ATRACE_HFR_CALL();
307
308 if (mHandoutTotalBufferCount == 1) {
309 returnPrefetchedBuffersLocked();
310 }
311
312 status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
313 /*output*/true, transform, surface_ids);
314
315 if (res != OK) {
316 return res;
317 }
318
319 mLastTimestamp = timestamp;
320 mFrameCount++;
321
322 return OK;
323 }
324
fixUpHidlJpegBlobHeader(ANativeWindowBuffer * anwBuffer,int fence)325 status_t Camera3OutputStream::fixUpHidlJpegBlobHeader(ANativeWindowBuffer* anwBuffer, int fence) {
326 // Lock the JPEG buffer for CPU read
327 sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
328 void* mapped = nullptr;
329 base::unique_fd fenceFd(dup(fence));
330 // Use USAGE_SW_WRITE_RARELY since we're going to re-write the CameraBlob
331 // header.
332 GraphicBufferLocker gbLocker(graphicBuffer);
333 status_t res =
334 gbLocker.lockAsync(
335 GraphicBuffer::USAGE_SW_READ_OFTEN | GraphicBuffer::USAGE_SW_WRITE_RARELY,
336 &mapped, fenceFd.release());
337 if (res != OK) {
338 ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
339 return res;
340 }
341
342 uint8_t *hidlHeaderStart =
343 static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t);
344 // Check that the jpeg buffer is big enough to contain HIDL camera blob
345 if (hidlHeaderStart < static_cast<uint8_t *>(mapped)) {
346 ALOGE("%s, jpeg buffer not large enough to fit HIDL camera blob %" PRIu32, __FUNCTION__,
347 graphicBuffer->getWidth());
348 return BAD_VALUE;
349 }
350 camera_jpeg_blob_t *hidlBlobHeader = reinterpret_cast<camera_jpeg_blob_t *>(hidlHeaderStart);
351
352 // Check that the blob is indeed the jpeg blob id.
353 if (hidlBlobHeader->jpeg_blob_id != CAMERA_JPEG_BLOB_ID) {
354 ALOGE("%s, jpeg blob id %d is not correct", __FUNCTION__, hidlBlobHeader->jpeg_blob_id);
355 return BAD_VALUE;
356 }
357
358 // Retrieve id and blob size
359 CameraBlobId blobId = static_cast<CameraBlobId>(hidlBlobHeader->jpeg_blob_id);
360 uint32_t blobSizeBytes = hidlBlobHeader->jpeg_size;
361
362 if (blobSizeBytes > (graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t))) {
363 ALOGE("%s, blobSize in HIDL jpeg blob : %d is corrupt, buffer size %" PRIu32, __FUNCTION__,
364 blobSizeBytes, graphicBuffer->getWidth());
365 }
366
367 uint8_t *aidlHeaderStart =
368 static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(CameraBlob);
369
370 // Check that the jpeg buffer is big enough to contain AIDL camera blob
371 if (aidlHeaderStart < static_cast<uint8_t *>(mapped)) {
372 ALOGE("%s, jpeg buffer not large enough to fit AIDL camera blob %" PRIu32, __FUNCTION__,
373 graphicBuffer->getWidth());
374 return BAD_VALUE;
375 }
376
377 if (static_cast<uint8_t*>(mapped) + blobSizeBytes > aidlHeaderStart) {
378 ALOGE("%s, jpeg blob with size %d , buffer size %" PRIu32 " not large enough to fit"
379 " AIDL camera blob without corrupting jpeg", __FUNCTION__, blobSizeBytes,
380 graphicBuffer->getWidth());
381 return BAD_VALUE;
382 }
383
384 // Fill in JPEG header
385 CameraBlob aidlHeader = {
386 .blobId = blobId,
387 .blobSizeBytes = static_cast<int32_t>(blobSizeBytes)
388 };
389 memcpy(aidlHeaderStart, &aidlHeader, sizeof(CameraBlob));
390 graphicBuffer->unlock();
391 return OK;
392 }
393
returnBufferCheckedLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,bool output,int32_t transform,const std::vector<size_t> & surface_ids,sp<Fence> * releaseFenceOut)394 status_t Camera3OutputStream::returnBufferCheckedLocked(
395 const camera_stream_buffer &buffer,
396 nsecs_t timestamp,
397 nsecs_t readoutTimestamp,
398 [[maybe_unused]] bool output,
399 int32_t transform,
400 const std::vector<size_t>& surface_ids,
401 /*out*/
402 sp<Fence> *releaseFenceOut) {
403
404 ALOG_ASSERT(output, "Expected output to be true");
405
406 status_t res;
407
408 // Fence management - always honor release fence from HAL
409 sp<Fence> releaseFence = new Fence(buffer.release_fence);
410 int anwReleaseFence = releaseFence->dup();
411
412 /**
413 * Release the lock briefly to avoid deadlock with
414 * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
415 * thread will go into StreamingProcessor::onFrameAvailable) during
416 * queueBuffer
417 */
418 sp<ANativeWindow> currentConsumer = mConsumer;
419 StreamState state = mState;
420 mLock.unlock();
421
422 ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
423 bool bufferDeferred = false;
424 /**
425 * Return buffer back to ANativeWindow
426 */
427 if (buffer.status == CAMERA_BUFFER_STATUS_ERROR || mDropBuffers || timestamp == 0) {
428 // Cancel buffer
429 if (mDropBuffers) {
430 ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
431 } else if (buffer.status == CAMERA_BUFFER_STATUS_ERROR) {
432 ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
433 } else {
434 ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
435 }
436
437 res = currentConsumer->cancelBuffer(currentConsumer.get(),
438 anwBuffer,
439 anwReleaseFence);
440 if (shouldLogError(res, state)) {
441 ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
442 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
443 }
444
445 notifyBufferReleased(anwBuffer);
446 if (mUseBufferManager) {
447 // Return this buffer back to buffer manager.
448 mBufferProducerListener->onBufferReleased();
449 }
450 } else {
451 if (mTraceFirstBuffer && (stream_type == CAMERA_STREAM_OUTPUT)) {
452 {
453 char traceLog[48];
454 snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId);
455 ATRACE_NAME(traceLog);
456 }
457 mTraceFirstBuffer = false;
458 }
459 // Fix CameraBlob id type discrepancy between HIDL and AIDL, details : http://b/229688810
460 if (getFormat() == HAL_PIXEL_FORMAT_BLOB && (getDataSpace() == HAL_DATASPACE_V0_JFIF ||
461 (getDataSpace() ==
462 static_cast<android_dataspace_t>(
463 aidl::android::hardware::graphics::common::Dataspace::JPEG_R)))) {
464 if (mIPCTransport == IPCTransport::HIDL) {
465 fixUpHidlJpegBlobHeader(anwBuffer, anwReleaseFence);
466 }
467 // If this is a JPEG output, and image dump mask is set, save image to
468 // disk.
469 if (mImageDumpMask) {
470 dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
471 }
472 }
473
474 nsecs_t captureTime = ((mUseReadoutTime || mSyncToDisplay) && readoutTimestamp != 0 ?
475 readoutTimestamp : timestamp) - mTimestampOffset;
476 if (mPreviewFrameSpacer != nullptr) {
477 nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
478 - mTimestampOffset;
479 res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
480 transform, anwBuffer, anwReleaseFence);
481 if (res != OK) {
482 ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
483 __FUNCTION__, mId, strerror(-res), res);
484 return res;
485 }
486 bufferDeferred = true;
487 } else {
488 nsecs_t presentTime = mSyncToDisplay ?
489 syncTimestampToDisplayLocked(captureTime, releaseFence) : captureTime;
490
491 setTransform(transform, true/*mayChangeMirror*/);
492 res = native_window_set_buffers_timestamp(mConsumer.get(), presentTime);
493 if (res != OK) {
494 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
495 __FUNCTION__, mId, strerror(-res), res);
496 return res;
497 }
498
499 queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
500
501 res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
502 if (shouldLogError(res, state)) {
503 ALOGE("%s: Stream %d: Error queueing buffer to native window:"
504 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
505 }
506 }
507 }
508 mLock.lock();
509
510 if (bufferDeferred) {
511 mCachedOutputBufferCount++;
512 }
513
514 // Once a valid buffer has been returned to the queue, can no longer
515 // dequeue all buffers for preallocation.
516 if (buffer.status != CAMERA_BUFFER_STATUS_ERROR) {
517 mStreamUnpreparable = true;
518 }
519
520 *releaseFenceOut = releaseFence;
521
522 return res;
523 }
524
dump(int fd,const Vector<String16> & args) const525 void Camera3OutputStream::dump(int fd, [[maybe_unused]] const Vector<String16> &args) const {
526 String8 lines;
527 lines.appendFormat(" Stream[%d]: Output\n", mId);
528 lines.appendFormat(" Consumer name: %s\n", mConsumerName.string());
529 write(fd, lines.string(), lines.size());
530
531 Camera3IOStreamBase::dump(fd, args);
532
533 mDequeueBufferLatency.dump(fd,
534 " DequeueBuffer latency histogram:");
535 }
536
setTransform(int transform,bool mayChangeMirror)537 status_t Camera3OutputStream::setTransform(int transform, bool mayChangeMirror) {
538 ATRACE_CALL();
539 Mutex::Autolock l(mLock);
540 if (mMirrorMode != OutputConfiguration::MIRROR_MODE_AUTO && mayChangeMirror) {
541 // If the mirroring mode is not AUTO, do not allow transform update
542 // which may change mirror.
543 return OK;
544 }
545
546 return setTransformLocked(transform);
547 }
548
setTransformLocked(int transform)549 status_t Camera3OutputStream::setTransformLocked(int transform) {
550 status_t res = OK;
551
552 if (transform == -1) return res;
553
554 if (mState == STATE_ERROR) {
555 ALOGE("%s: Stream in error state", __FUNCTION__);
556 return INVALID_OPERATION;
557 }
558
559 mTransform = transform;
560 if (mState == STATE_CONFIGURED) {
561 res = native_window_set_buffers_transform(mConsumer.get(),
562 transform);
563 if (res != OK) {
564 ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
565 __FUNCTION__, transform, strerror(-res), res);
566 }
567 }
568 return res;
569 }
570
configureQueueLocked()571 status_t Camera3OutputStream::configureQueueLocked() {
572 status_t res;
573
574 mTraceFirstBuffer = true;
575 if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
576 return res;
577 }
578
579 if ((res = configureConsumerQueueLocked(true /*allowPreviewRespace*/)) != OK) {
580 return res;
581 }
582
583 // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
584 // We need skip these cases as timeout will disable the non-blocking (async) mode.
585 if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
586 if (mUseBufferManager) {
587 // When buffer manager is handling the buffer, we should have available buffers in
588 // buffer queue before we calls into dequeueBuffer because buffer manager is tracking
589 // free buffers.
590 // There are however some consumer side feature (ImageReader::discardFreeBuffers) that
591 // can discard free buffers without notifying buffer manager. We want the timeout to
592 // happen immediately here so buffer manager can try to update its internal state and
593 // try to allocate a buffer instead of waiting.
594 mConsumer->setDequeueTimeout(0);
595 } else {
596 mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
597 }
598 }
599
600 return OK;
601 }
602
configureConsumerQueueLocked(bool allowPreviewRespace)603 status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewRespace) {
604 status_t res;
605
606 mTraceFirstBuffer = true;
607
608 ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
609
610 // Configure consumer-side ANativeWindow interface. The listener may be used
611 // to notify buffer manager (if it is used) of the returned buffers.
612 res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
613 /*reportBufferRemoval*/true,
614 /*listener*/mBufferProducerListener);
615 if (res != OK) {
616 ALOGE("%s: Unable to connect to native window for stream %d",
617 __FUNCTION__, mId);
618 return res;
619 }
620
621 mConsumerName = mConsumer->getConsumerName();
622
623 res = native_window_set_usage(mConsumer.get(), mUsage);
624 if (res != OK) {
625 ALOGE("%s: Unable to configure usage %" PRIu64 " for stream %d",
626 __FUNCTION__, mUsage, mId);
627 return res;
628 }
629
630 res = native_window_set_scaling_mode(mConsumer.get(),
631 NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
632 if (res != OK) {
633 ALOGE("%s: Unable to configure stream scaling: %s (%d)",
634 __FUNCTION__, strerror(-res), res);
635 return res;
636 }
637
638 if (mMaxSize == 0) {
639 // For buffers of known size
640 res = native_window_set_buffers_dimensions(mConsumer.get(),
641 camera_stream::width, camera_stream::height);
642 } else {
643 // For buffers with bounded size
644 res = native_window_set_buffers_dimensions(mConsumer.get(),
645 mMaxSize, 1);
646 }
647 if (res != OK) {
648 ALOGE("%s: Unable to configure stream buffer dimensions"
649 " %d x %d (maxSize %zu) for stream %d",
650 __FUNCTION__, camera_stream::width, camera_stream::height,
651 mMaxSize, mId);
652 return res;
653 }
654 res = native_window_set_buffers_format(mConsumer.get(),
655 camera_stream::format);
656 if (res != OK) {
657 ALOGE("%s: Unable to configure stream buffer format %#x for stream %d",
658 __FUNCTION__, camera_stream::format, mId);
659 return res;
660 }
661
662 res = native_window_set_buffers_data_space(mConsumer.get(),
663 camera_stream::data_space);
664 if (res != OK) {
665 ALOGE("%s: Unable to configure stream dataspace %#x for stream %d",
666 __FUNCTION__, camera_stream::data_space, mId);
667 return res;
668 }
669
670 int maxConsumerBuffers;
671 res = static_cast<ANativeWindow*>(mConsumer.get())->query(
672 mConsumer.get(),
673 NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
674 if (res != OK) {
675 ALOGE("%s: Unable to query consumer undequeued"
676 " buffer count for stream %d", __FUNCTION__, mId);
677 return res;
678 }
679
680 ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
681 maxConsumerBuffers, camera_stream::max_buffers);
682 if (camera_stream::max_buffers == 0) {
683 ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
684 __FUNCTION__, camera_stream::max_buffers);
685 return INVALID_OPERATION;
686 }
687
688 mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
689
690 int timestampBase = getTimestampBase();
691 bool isDefaultTimeBase = (timestampBase ==
692 OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
693 if (allowPreviewRespace) {
694 bool forceChoreographer = (timestampBase ==
695 OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED);
696 bool defaultToChoreographer = (isDefaultTimeBase &&
697 isConsumedByHWComposer());
698 bool defaultToSpacer = (isDefaultTimeBase &&
699 isConsumedByHWTexture() &&
700 !isConsumedByCPU() &&
701 !isVideoStream());
702 if (forceChoreographer || defaultToChoreographer) {
703 mSyncToDisplay = true;
704 // For choreographer synced stream, extra buffers aren't kept by
705 // camera service. So no need to update mMaxCachedBufferCount.
706 mTotalBufferCount += kDisplaySyncExtraBuffer;
707 } else if (defaultToSpacer) {
708 mPreviewFrameSpacer = new PreviewFrameSpacer(this, mConsumer);
709 // For preview frame spacer, the extra buffer is kept by camera
710 // service. So update mMaxCachedBufferCount.
711 mMaxCachedBufferCount = 1;
712 mTotalBufferCount += mMaxCachedBufferCount;
713 res = mPreviewFrameSpacer->run(String8::format("PreviewSpacer-%d", mId).string());
714 if (res != OK) {
715 ALOGE("%s: Unable to start preview spacer: %s (%d)", __FUNCTION__,
716 strerror(-res), res);
717 return res;
718 }
719 }
720 }
721 mHandoutTotalBufferCount = 0;
722 mFrameCount = 0;
723 mLastTimestamp = 0;
724
725 if (isDeviceTimeBaseRealtime()) {
726 if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
727 // Default time base, but not hardware composer or video encoder
728 mTimestampOffset = 0;
729 } else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
730 timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
731 mTimestampOffset = 0;
732 }
733 // If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
734 // timestamp offset as bootTime - monotonicTime.
735 } else {
736 if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME) {
737 // Reverse offset for monotonicTime -> bootTime
738 mTimestampOffset = -mTimestampOffset;
739 } else {
740 // If timestampBase is DEFAULT, MONOTONIC, SENSOR or
741 // CHOREOGRAPHER_SYNCED, timestamp offset is 0.
742 mTimestampOffset = 0;
743 }
744 }
745
746 res = native_window_set_buffer_count(mConsumer.get(),
747 mTotalBufferCount);
748 if (res != OK) {
749 ALOGE("%s: Unable to set buffer count for stream %d",
750 __FUNCTION__, mId);
751 return res;
752 }
753
754 res = native_window_set_buffers_transform(mConsumer.get(),
755 mTransform);
756 if (res != OK) {
757 ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
758 __FUNCTION__, mTransform, strerror(-res), res);
759 return res;
760 }
761
762 /**
763 * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs requires
764 * buffers to be statically allocated for internal static buffer registration, while the
765 * buffers provided by buffer manager are really dynamically allocated. Camera3Device only
766 * sets the mBufferManager if device version is > HAL3.2, which guarantees that the buffer
767 * manager setup is skipped in below code. Note that HAL3.2 is also excluded here, as some
768 * HAL3.2 devices may not support the dynamic buffer registeration.
769 * Also Camera3BufferManager does not support display/texture streams as they have its own
770 * buffer management logic.
771 */
772 if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID &&
773 !(isConsumedByHWComposer() || isConsumedByHWTexture())) {
774 uint64_t consumerUsage = 0;
775 getEndpointUsage(&consumerUsage);
776 uint32_t width = (mMaxSize == 0) ? getWidth() : mMaxSize;
777 uint32_t height = (mMaxSize == 0) ? getHeight() : 1;
778 StreamInfo streamInfo(
779 getId(), getStreamSetId(), width, height, getFormat(), getDataSpace(),
780 mUsage | consumerUsage, mTotalBufferCount,
781 /*isConfigured*/true, isMultiResolution());
782 wp<Camera3OutputStream> weakThis(this);
783 res = mBufferManager->registerStream(weakThis,
784 streamInfo);
785 if (res == OK) {
786 // Disable buffer allocation for this BufferQueue, buffer manager will take over
787 // the buffer allocation responsibility.
788 mConsumer->getIGraphicBufferProducer()->allowAllocation(false);
789 mUseBufferManager = true;
790 } else {
791 ALOGE("%s: Unable to register stream %d to camera3 buffer manager, "
792 "(error %d %s), fall back to BufferQueue for buffer management!",
793 __FUNCTION__, mId, res, strerror(-res));
794 }
795 }
796
797 return OK;
798 }
799
getBufferLockedCommon(ANativeWindowBuffer ** anb,int * fenceFd)800 status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
801 ATRACE_HFR_CALL();
802 status_t res;
803
804 if ((res = getBufferPreconditionCheckLocked()) != OK) {
805 return res;
806 }
807
808 bool gotBufferFromManager = false;
809
810 if (mUseBufferManager) {
811 sp<GraphicBuffer> gb;
812 res = mBufferManager->getBufferForStream(getId(), getStreamSetId(),
813 isMultiResolution(), &gb, fenceFd);
814 if (res == OK) {
815 // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
816 // successful return.
817 *anb = gb.get();
818 res = mConsumer->attachBuffer(*anb);
819 if (shouldLogError(res, mState)) {
820 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
821 __FUNCTION__, mId, strerror(-res), res);
822 }
823 if (res != OK) {
824 checkRetAndSetAbandonedLocked(res);
825 return res;
826 }
827 gotBufferFromManager = true;
828 ALOGV("Stream %d: Attached new buffer", getId());
829 } else if (res == ALREADY_EXISTS) {
830 // Have sufficient free buffers already attached, can just
831 // dequeue from buffer queue
832 ALOGV("Stream %d: Reusing attached buffer", getId());
833 gotBufferFromManager = false;
834 } else if (res != OK) {
835 ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
836 __FUNCTION__, mId, strerror(-res), res);
837 return res;
838 }
839 }
840 if (!gotBufferFromManager) {
841 /**
842 * Release the lock briefly to avoid deadlock for below scenario:
843 * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
844 * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
845 * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
846 * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
847 * StreamingProcessor lock.
848 * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
849 * and try to lock bufferQueue lock.
850 * Then there is circular locking dependency.
851 */
852 sp<Surface> consumer = mConsumer;
853 size_t remainingBuffers = (mState == STATE_PREPARING ? mTotalBufferCount :
854 camera_stream::max_buffers) - mHandoutTotalBufferCount;
855 mLock.unlock();
856
857 nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
858
859 size_t batchSize = mBatchSize.load();
860 if (batchSize == 1) {
861 sp<ANativeWindow> anw = consumer;
862 res = anw->dequeueBuffer(anw.get(), anb, fenceFd);
863 } else {
864 std::unique_lock<std::mutex> batchLock(mBatchLock);
865 res = OK;
866 if (mBatchedBuffers.size() == 0) {
867 if (remainingBuffers == 0) {
868 ALOGE("%s: cannot get buffer while all buffers are handed out", __FUNCTION__);
869 return INVALID_OPERATION;
870 }
871 if (batchSize > remainingBuffers) {
872 batchSize = remainingBuffers;
873 }
874 batchLock.unlock();
875 // Refill batched buffers
876 std::vector<Surface::BatchBuffer> batchedBuffers;
877 batchedBuffers.resize(batchSize);
878 res = consumer->dequeueBuffers(&batchedBuffers);
879 batchLock.lock();
880 if (res != OK) {
881 ALOGE("%s: batch dequeueBuffers call failed! %s (%d)",
882 __FUNCTION__, strerror(-res), res);
883 } else {
884 mBatchedBuffers = std::move(batchedBuffers);
885 }
886 }
887
888 if (res == OK) {
889 // Dispatch batch buffers
890 *anb = mBatchedBuffers.back().buffer;
891 *fenceFd = mBatchedBuffers.back().fenceFd;
892 mBatchedBuffers.pop_back();
893 }
894 }
895
896 nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
897 mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
898
899 mLock.lock();
900
901 if (mUseBufferManager && res == TIMED_OUT) {
902 checkRemovedBuffersLocked();
903
904 sp<GraphicBuffer> gb;
905 res = mBufferManager->getBufferForStream(
906 getId(), getStreamSetId(), isMultiResolution(),
907 &gb, fenceFd, /*noFreeBuffer*/true);
908
909 if (res == OK) {
910 // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after
911 // a successful return.
912 *anb = gb.get();
913 res = mConsumer->attachBuffer(*anb);
914 gotBufferFromManager = true;
915 ALOGV("Stream %d: Attached new buffer", getId());
916
917 if (res != OK) {
918 if (shouldLogError(res, mState)) {
919 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
920 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
921 }
922 checkRetAndSetAbandonedLocked(res);
923 return res;
924 }
925 } else {
926 ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager:"
927 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
928 return res;
929 }
930 } else if (res != OK) {
931 if (shouldLogError(res, mState)) {
932 ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
933 __FUNCTION__, mId, strerror(-res), res);
934 }
935 checkRetAndSetAbandonedLocked(res);
936 return res;
937 }
938 }
939
940 if (res == OK) {
941 checkRemovedBuffersLocked();
942 }
943
944 return res;
945 }
946
checkRemovedBuffersLocked(bool notifyBufferManager)947 void Camera3OutputStream::checkRemovedBuffersLocked(bool notifyBufferManager) {
948 std::vector<sp<GraphicBuffer>> removedBuffers;
949 status_t res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
950 if (res == OK) {
951 onBuffersRemovedLocked(removedBuffers);
952
953 if (notifyBufferManager && mUseBufferManager && removedBuffers.size() > 0) {
954 mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), isMultiResolution(),
955 removedBuffers.size());
956 }
957 }
958 }
959
checkRetAndSetAbandonedLocked(status_t res)960 void Camera3OutputStream::checkRetAndSetAbandonedLocked(status_t res) {
961 // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is
962 // STATE_PREPARING, let prepareNextBuffer handle the error.)
963 if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
964 mState = STATE_ABANDONED;
965 }
966 }
967
shouldLogError(status_t res,StreamState state)968 bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
969 if (res == OK) {
970 return false;
971 }
972 if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
973 return false;
974 }
975 return true;
976 }
977
onCachedBufferQueued()978 void Camera3OutputStream::onCachedBufferQueued() {
979 Mutex::Autolock l(mLock);
980 mCachedOutputBufferCount--;
981 // Signal whoever is waiting for the buffer to be returned to the buffer
982 // queue.
983 mOutputBufferReturnedSignal.signal();
984 }
985
disconnectLocked()986 status_t Camera3OutputStream::disconnectLocked() {
987 status_t res;
988
989 if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
990 return res;
991 }
992
993 // Stream configuration was not finished (can only be in STATE_IN_CONFIG or STATE_CONSTRUCTED
994 // state), don't need change the stream state, return OK.
995 if (mConsumer == nullptr) {
996 return OK;
997 }
998
999 returnPrefetchedBuffersLocked();
1000
1001 if (mPreviewFrameSpacer != nullptr) {
1002 mPreviewFrameSpacer->requestExit();
1003 }
1004
1005 ALOGV("%s: disconnecting stream %d from native window", __FUNCTION__, getId());
1006
1007 res = native_window_api_disconnect(mConsumer.get(),
1008 NATIVE_WINDOW_API_CAMERA);
1009 /**
1010 * This is not an error. if client calling process dies, the window will
1011 * also die and all calls to it will return DEAD_OBJECT, thus it's already
1012 * "disconnected"
1013 */
1014 if (res == DEAD_OBJECT) {
1015 ALOGW("%s: While disconnecting stream %d from native window, the"
1016 " native window died from under us", __FUNCTION__, mId);
1017 }
1018 else if (res != OK) {
1019 ALOGE("%s: Unable to disconnect stream %d from native window "
1020 "(error %d %s)",
1021 __FUNCTION__, mId, res, strerror(-res));
1022 mState = STATE_ERROR;
1023 return res;
1024 }
1025
1026 // Since device is already idle, there is no getBuffer call to buffer manager, unregister the
1027 // stream at this point should be safe.
1028 if (mUseBufferManager) {
1029 res = mBufferManager->unregisterStream(getId(), getStreamSetId(), isMultiResolution());
1030 if (res != OK) {
1031 ALOGE("%s: Unable to unregister stream %d from buffer manager "
1032 "(error %d %s)", __FUNCTION__, mId, res, strerror(-res));
1033 mState = STATE_ERROR;
1034 return res;
1035 }
1036 // Note that, to make prepare/teardown case work, we must not mBufferManager.clear(), as
1037 // the stream is still in usable state after this call.
1038 mUseBufferManager = false;
1039 }
1040
1041 mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
1042 : STATE_CONSTRUCTED;
1043
1044 mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
1045 mDequeueBufferLatency.reset();
1046 return OK;
1047 }
1048
getEndpointUsage(uint64_t * usage) const1049 status_t Camera3OutputStream::getEndpointUsage(uint64_t *usage) const {
1050
1051 status_t res;
1052
1053 if (mConsumer == nullptr) {
1054 // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
1055 *usage = mConsumerUsage;
1056 return OK;
1057 }
1058
1059 res = getEndpointUsageForSurface(usage, mConsumer);
1060
1061 return res;
1062 }
1063
applyZSLUsageQuirk(int format,uint64_t * consumerUsage)1064 void Camera3OutputStream::applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/) {
1065 if (consumerUsage == nullptr) {
1066 return;
1067 }
1068
1069 // If an opaque output stream's endpoint is ImageReader, add
1070 // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
1071 // for the ZSL use case.
1072 // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set:
1073 // 1. GRALLOC_USAGE_HW_TEXTURE
1074 // 2. GRALLOC_USAGE_HW_RENDER
1075 // 3. GRALLOC_USAGE_HW_COMPOSER
1076 // 4. GRALLOC_USAGE_HW_VIDEO_ENCODER
1077 if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
1078 (*consumerUsage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
1079 GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
1080 *consumerUsage |= GRALLOC_USAGE_HW_CAMERA_ZSL;
1081 }
1082 }
1083
getEndpointUsageForSurface(uint64_t * usage,const sp<Surface> & surface) const1084 status_t Camera3OutputStream::getEndpointUsageForSurface(uint64_t *usage,
1085 const sp<Surface>& surface) const {
1086 status_t res;
1087 uint64_t u = 0;
1088
1089 res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(surface.get()), &u);
1090 applyZSLUsageQuirk(camera_stream::format, &u);
1091 *usage = u;
1092 return res;
1093 }
1094
isVideoStream() const1095 bool Camera3OutputStream::isVideoStream() const {
1096 uint64_t usage = 0;
1097 status_t res = getEndpointUsage(&usage);
1098 if (res != OK) {
1099 ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1100 return false;
1101 }
1102
1103 return (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0;
1104 }
1105
setBufferManager(sp<Camera3BufferManager> bufferManager)1106 status_t Camera3OutputStream::setBufferManager(sp<Camera3BufferManager> bufferManager) {
1107 Mutex::Autolock l(mLock);
1108 if (mState != STATE_CONSTRUCTED) {
1109 ALOGE("%s: this method can only be called when stream in CONSTRUCTED state.",
1110 __FUNCTION__);
1111 return INVALID_OPERATION;
1112 }
1113 mBufferManager = bufferManager;
1114
1115 return OK;
1116 }
1117
updateStream(const std::vector<sp<Surface>> &,const std::vector<OutputStreamInfo> &,const std::vector<size_t> &,KeyedVector<sp<Surface>,size_t> *)1118 status_t Camera3OutputStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
1119 const std::vector<OutputStreamInfo> &/*outputInfo*/,
1120 const std::vector<size_t> &/*removedSurfaceIds*/,
1121 KeyedVector<sp<Surface>, size_t> * /*outputMapo*/) {
1122 ALOGE("%s: this method is not supported!", __FUNCTION__);
1123 return INVALID_OPERATION;
1124 }
1125
onBufferReleased()1126 void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
1127 sp<Camera3OutputStream> stream = mParent.promote();
1128 if (stream == nullptr) {
1129 ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1130 return;
1131 }
1132
1133 Mutex::Autolock l(stream->mLock);
1134 if (!(stream->mUseBufferManager)) {
1135 return;
1136 }
1137
1138 ALOGV("Stream %d: Buffer released", stream->getId());
1139 bool shouldFreeBuffer = false;
1140 status_t res = stream->mBufferManager->onBufferReleased(
1141 stream->getId(), stream->getStreamSetId(), stream->isMultiResolution(),
1142 &shouldFreeBuffer);
1143 if (res != OK) {
1144 ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
1145 strerror(-res), res);
1146 stream->mState = STATE_ERROR;
1147 }
1148
1149 if (shouldFreeBuffer) {
1150 sp<GraphicBuffer> buffer;
1151 // Detach and free a buffer (when buffer goes out of scope)
1152 stream->detachBufferLocked(&buffer, /*fenceFd*/ nullptr);
1153 if (buffer.get() != nullptr) {
1154 stream->mBufferManager->notifyBufferRemoved(
1155 stream->getId(), stream->getStreamSetId(), stream->isMultiResolution());
1156 }
1157 }
1158 }
1159
onBuffersDiscarded(const std::vector<sp<GraphicBuffer>> & buffers)1160 void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
1161 const std::vector<sp<GraphicBuffer>>& buffers) {
1162 sp<Camera3OutputStream> stream = mParent.promote();
1163 if (stream == nullptr) {
1164 ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1165 return;
1166 }
1167
1168 if (buffers.size() > 0) {
1169 Mutex::Autolock l(stream->mLock);
1170 stream->onBuffersRemovedLocked(buffers);
1171 if (stream->mUseBufferManager) {
1172 stream->mBufferManager->onBuffersRemoved(stream->getId(),
1173 stream->getStreamSetId(), stream->isMultiResolution(), buffers.size());
1174 }
1175 ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
1176 }
1177 }
1178
onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>> & removedBuffers)1179 void Camera3OutputStream::onBuffersRemovedLocked(
1180 const std::vector<sp<GraphicBuffer>>& removedBuffers) {
1181 sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
1182 if (callback != nullptr) {
1183 for (const auto& gb : removedBuffers) {
1184 callback->onBufferFreed(mId, gb->handle);
1185 }
1186 }
1187 }
1188
detachBuffer(sp<GraphicBuffer> * buffer,int * fenceFd)1189 status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
1190 Mutex::Autolock l(mLock);
1191 return detachBufferLocked(buffer, fenceFd);
1192 }
1193
detachBufferLocked(sp<GraphicBuffer> * buffer,int * fenceFd)1194 status_t Camera3OutputStream::detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd) {
1195 ALOGV("Stream %d: detachBuffer", getId());
1196 if (buffer == nullptr) {
1197 return BAD_VALUE;
1198 }
1199
1200 sp<Fence> fence;
1201 status_t res = mConsumer->detachNextBuffer(buffer, &fence);
1202 if (res == NO_MEMORY) {
1203 // This may rarely happen, which indicates that the released buffer was freed by other
1204 // call (e.g., attachBuffer, dequeueBuffer etc.) before reaching here. We should notify the
1205 // buffer manager that this buffer has been freed. It's not fatal, but should be avoided,
1206 // therefore log a warning.
1207 *buffer = 0;
1208 ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
1209 } else if (res != OK) {
1210 // Treat other errors as abandonment
1211 if (shouldLogError(res, mState)) {
1212 ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1213 }
1214 mState = STATE_ABANDONED;
1215 return res;
1216 }
1217
1218 if (fenceFd != nullptr) {
1219 if (fence!= 0 && fence->isValid()) {
1220 *fenceFd = fence->dup();
1221 } else {
1222 *fenceFd = -1;
1223 }
1224 }
1225
1226 // Here we assume detachBuffer is called by buffer manager so it doesn't need to be notified
1227 checkRemovedBuffersLocked(/*notifyBufferManager*/false);
1228 return res;
1229 }
1230
dropBuffers(bool dropping)1231 status_t Camera3OutputStream::dropBuffers(bool dropping) {
1232 Mutex::Autolock l(mLock);
1233 mDropBuffers = dropping;
1234 return OK;
1235 }
1236
getPhysicalCameraId() const1237 const String8& Camera3OutputStream::getPhysicalCameraId() const {
1238 Mutex::Autolock l(mLock);
1239 return physicalCameraId();
1240 }
1241
notifyBufferReleased(ANativeWindowBuffer *)1242 status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
1243 return OK;
1244 }
1245
isConsumerConfigurationDeferred(size_t surface_id) const1246 bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
1247 Mutex::Autolock l(mLock);
1248
1249 if (surface_id != 0) {
1250 ALOGE("%s: surface_id %zu for Camera3OutputStream should be 0!", __FUNCTION__, surface_id);
1251 }
1252 return mConsumer == nullptr;
1253 }
1254
setConsumers(const std::vector<sp<Surface>> & consumers)1255 status_t Camera3OutputStream::setConsumers(const std::vector<sp<Surface>>& consumers) {
1256 Mutex::Autolock l(mLock);
1257 if (consumers.size() != 1) {
1258 ALOGE("%s: it's illegal to set %zu consumer surfaces!",
1259 __FUNCTION__, consumers.size());
1260 return INVALID_OPERATION;
1261 }
1262 if (consumers[0] == nullptr) {
1263 ALOGE("%s: it's illegal to set null consumer surface!", __FUNCTION__);
1264 return INVALID_OPERATION;
1265 }
1266
1267 if (mConsumer != nullptr) {
1268 ALOGE("%s: consumer surface was already set!", __FUNCTION__);
1269 return INVALID_OPERATION;
1270 }
1271
1272 mConsumer = consumers[0];
1273 return OK;
1274 }
1275
isConsumedByHWComposer() const1276 bool Camera3OutputStream::isConsumedByHWComposer() const {
1277 uint64_t usage = 0;
1278 status_t res = getEndpointUsage(&usage);
1279 if (res != OK) {
1280 ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1281 return false;
1282 }
1283
1284 return (usage & GRALLOC_USAGE_HW_COMPOSER) != 0;
1285 }
1286
isConsumedByHWTexture() const1287 bool Camera3OutputStream::isConsumedByHWTexture() const {
1288 uint64_t usage = 0;
1289 status_t res = getEndpointUsage(&usage);
1290 if (res != OK) {
1291 ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1292 return false;
1293 }
1294
1295 return (usage & GRALLOC_USAGE_HW_TEXTURE) != 0;
1296 }
1297
isConsumedByCPU() const1298 bool Camera3OutputStream::isConsumedByCPU() const {
1299 uint64_t usage = 0;
1300 status_t res = getEndpointUsage(&usage);
1301 if (res != OK) {
1302 ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1303 return false;
1304 }
1305
1306 return (usage & GRALLOC_USAGE_SW_READ_MASK) != 0;
1307 }
1308
dumpImageToDisk(nsecs_t timestamp,ANativeWindowBuffer * anwBuffer,int fence)1309 void Camera3OutputStream::dumpImageToDisk(nsecs_t timestamp,
1310 ANativeWindowBuffer* anwBuffer, int fence) {
1311 // Deriver output file name
1312 std::string fileExtension = "jpg";
1313 char imageFileName[64];
1314 time_t now = time(0);
1315 tm *localTime = localtime(&now);
1316 snprintf(imageFileName, sizeof(imageFileName), "IMG_%4d%02d%02d_%02d%02d%02d_%" PRId64 ".%s",
1317 1900 + localTime->tm_year, localTime->tm_mon + 1, localTime->tm_mday,
1318 localTime->tm_hour, localTime->tm_min, localTime->tm_sec,
1319 timestamp, fileExtension.c_str());
1320
1321 // Lock the image for CPU read
1322 sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
1323 void* mapped = nullptr;
1324 base::unique_fd fenceFd(dup(fence));
1325 status_t res = graphicBuffer->lockAsync(GraphicBuffer::USAGE_SW_READ_OFTEN, &mapped,
1326 fenceFd.release());
1327 if (res != OK) {
1328 ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
1329 return;
1330 }
1331
1332 // Figure out actual file size
1333 auto actualJpegSize = android::camera2::JpegProcessor::findJpegSize((uint8_t*)mapped, mMaxSize);
1334 if (actualJpegSize == 0) {
1335 actualJpegSize = mMaxSize;
1336 }
1337
1338 // Output image data to file
1339 std::string filePath = "/data/misc/cameraserver/";
1340 filePath += imageFileName;
1341 std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
1342 if (!imageFile.is_open()) {
1343 ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
1344 graphicBuffer->unlock();
1345 return;
1346 }
1347 imageFile.write((const char*)mapped, actualJpegSize);
1348
1349 graphicBuffer->unlock();
1350 }
1351
setBatchSize(size_t batchSize)1352 status_t Camera3OutputStream::setBatchSize(size_t batchSize) {
1353 Mutex::Autolock l(mLock);
1354 if (batchSize == 0) {
1355 ALOGE("%s: invalid batch size 0", __FUNCTION__);
1356 return BAD_VALUE;
1357 }
1358
1359 if (mUseBufferManager) {
1360 ALOGE("%s: batch operation is not supported with buffer manager", __FUNCTION__);
1361 return INVALID_OPERATION;
1362 }
1363
1364 if (!isVideoStream()) {
1365 ALOGE("%s: batch operation is not supported with non-video stream", __FUNCTION__);
1366 return INVALID_OPERATION;
1367 }
1368
1369 if (camera_stream::max_buffers < batchSize) {
1370 ALOGW("%s: batch size is capped by max_buffers %d", __FUNCTION__,
1371 camera_stream::max_buffers);
1372 batchSize = camera_stream::max_buffers;
1373 }
1374
1375 size_t defaultBatchSize = 1;
1376 if (!mBatchSize.compare_exchange_strong(defaultBatchSize, batchSize)) {
1377 ALOGE("%s: change batch size from %zu to %zu dynamically is not supported",
1378 __FUNCTION__, defaultBatchSize, batchSize);
1379 return INVALID_OPERATION;
1380 }
1381
1382 return OK;
1383 }
1384
onMinDurationChanged(nsecs_t duration,bool fixedFps)1385 void Camera3OutputStream::onMinDurationChanged(nsecs_t duration, bool fixedFps) {
1386 Mutex::Autolock l(mLock);
1387 mMinExpectedDuration = duration;
1388 mFixedFps = fixedFps;
1389 }
1390
setStreamUseCase(int64_t streamUseCase)1391 void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
1392 Mutex::Autolock l(mLock);
1393 camera_stream::use_case = streamUseCase;
1394 }
1395
returnPrefetchedBuffersLocked()1396 void Camera3OutputStream::returnPrefetchedBuffersLocked() {
1397 std::vector<Surface::BatchBuffer> batchedBuffers;
1398
1399 {
1400 std::lock_guard<std::mutex> batchLock(mBatchLock);
1401 if (mBatchedBuffers.size() != 0) {
1402 ALOGW("%s: %zu extra prefetched buffers detected. Returning",
1403 __FUNCTION__, mBatchedBuffers.size());
1404 batchedBuffers = std::move(mBatchedBuffers);
1405 }
1406 }
1407
1408 if (batchedBuffers.size() > 0) {
1409 mConsumer->cancelBuffers(batchedBuffers);
1410 }
1411 }
1412
syncTimestampToDisplayLocked(nsecs_t t,sp<Fence> releaseFence)1413 nsecs_t Camera3OutputStream::syncTimestampToDisplayLocked(nsecs_t t, sp<Fence> releaseFence) {
1414 nsecs_t currentTime = systemTime();
1415 if (!mFixedFps) {
1416 mLastCaptureTime = t;
1417 mLastPresentTime = currentTime;
1418 return t;
1419 }
1420
1421 ParcelableVsyncEventData parcelableVsyncEventData;
1422 auto res = mDisplayEventReceiver.getLatestVsyncEventData(&parcelableVsyncEventData);
1423 if (res != OK) {
1424 ALOGE("%s: Stream %d: Error getting latest vsync event data: %s (%d)",
1425 __FUNCTION__, mId, strerror(-res), res);
1426 mLastCaptureTime = t;
1427 mLastPresentTime = currentTime;
1428 return t;
1429 }
1430
1431 const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
1432 nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
1433
1434 // Find the best presentation time without worrying about previous frame's
1435 // presentation time if capture interval is more than kSpacingResetIntervalNs.
1436 //
1437 // When frame interval is more than 50 ms apart (3 vsyncs for 60hz refresh rate),
1438 // there is little risk in starting over and finding the earliest vsync to latch onto.
1439 // - Update captureToPresentTime offset to be used for later frames.
1440 // - Example use cases:
1441 // - when frame rate drops down to below 20 fps, or
1442 // - A new streaming session starts (stopPreview followed by
1443 // startPreview)
1444 //
1445 nsecs_t captureInterval = t - mLastCaptureTime;
1446 if (captureInterval > kSpacingResetIntervalNs) {
1447 for (size_t i = 0; i < vsyncEventData.frameTimelinesLength; i++) {
1448 const auto& timeline = vsyncEventData.frameTimelines[i];
1449 if (timeline.deadlineTimestamp >= currentTime &&
1450 timeline.expectedPresentationTime > minPresentT) {
1451 nsecs_t presentT = vsyncEventData.frameTimelines[i].expectedPresentationTime;
1452 mCaptureToPresentOffset = presentT - t;
1453 mLastCaptureTime = t;
1454 mLastPresentTime = presentT;
1455
1456 // If releaseFence is available, store the fence to check signal
1457 // time later.
1458 mRefVsyncData = vsyncEventData;
1459 mReferenceCaptureTime = t;
1460 mReferenceArrivalTime = currentTime;
1461 if (releaseFence->isValid()) {
1462 mReferenceFrameFence = new Fence(releaseFence->dup());
1463 } else {
1464 mFenceSignalOffset = 0;
1465 }
1466
1467 // Move the expected presentation time back by 1/3 of frame interval to
1468 // mitigate the time drift. Due to time drift, if we directly use the
1469 // expected presentation time, often times 2 expected presentation time
1470 // falls into the same VSYNC interval.
1471 return presentT - vsyncEventData.frameInterval/3;
1472 }
1473 }
1474 }
1475
1476 // If there is a reference frame release fence, get the signal time and
1477 // update the captureToPresentOffset.
1478 if (mReferenceFrameFence != nullptr) {
1479 mFenceSignalOffset = 0;
1480 nsecs_t signalTime = mReferenceFrameFence->getSignalTime();
1481 // Now that the fence has signaled, recalculate the offsets based on
1482 // the timeline which was actually latched
1483 if (signalTime != INT64_MAX) {
1484 for (size_t i = 0; i < mRefVsyncData.frameTimelinesLength; i++) {
1485 const auto& timeline = mRefVsyncData.frameTimelines[i];
1486 if (timeline.deadlineTimestamp >= signalTime) {
1487 nsecs_t originalOffset = mCaptureToPresentOffset;
1488 mCaptureToPresentOffset = timeline.expectedPresentationTime
1489 - mReferenceCaptureTime;
1490 mLastPresentTime = timeline.expectedPresentationTime;
1491 mFenceSignalOffset = signalTime > mReferenceArrivalTime ?
1492 signalTime - mReferenceArrivalTime : 0;
1493
1494 ALOGV("%s: Last deadline %" PRId64 " signalTime %" PRId64
1495 " original offset %" PRId64 " new offset %" PRId64
1496 " fencesignal offset %" PRId64, __FUNCTION__,
1497 timeline.deadlineTimestamp, signalTime, originalOffset,
1498 mCaptureToPresentOffset, mFenceSignalOffset);
1499 break;
1500 }
1501 }
1502 mReferenceFrameFence.clear();
1503 }
1504 }
1505
1506 nsecs_t idealPresentT = t + mCaptureToPresentOffset;
1507 nsecs_t expectedPresentT = mLastPresentTime;
1508 nsecs_t minDiff = INT64_MAX;
1509
1510 // In fixed FPS case, when frame durations are close to multiples of display refresh
1511 // rate, derive minimum intervals between presentation times based on minimal
1512 // expected duration. The minimum number of Vsyncs is:
1513 // - 0 if minFrameDuration in (0, 1.5] * vSyncInterval,
1514 // - 1 if minFrameDuration in (1.5, 2.5] * vSyncInterval,
1515 // - and so on.
1516 //
1517 // This spaces out the displaying of the frames so that the frame
1518 // presentations are roughly in sync with frame captures.
1519 int minVsyncs = (mMinExpectedDuration - vsyncEventData.frameInterval / 2) /
1520 vsyncEventData.frameInterval;
1521 if (minVsyncs < 0) minVsyncs = 0;
1522 nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval;
1523
1524 // In fixed FPS case, if the frame duration deviates from multiples of
1525 // display refresh rate, find the closest Vsync without requiring a minimum
1526 // number of Vsync.
1527 //
1528 // Example: (24fps camera, 60hz refresh):
1529 // capture readout: | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
1530 // display VSYNC: | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1531 // | : 1 frame
1532 // t1 : 41.67ms
1533 // t2 : 16.67ms
1534 // t1/t2 = 2.5
1535 //
1536 // 24fps is a commonly used video frame rate. Because the capture
1537 // interval is 2.5 times of display refresh interval, the minVsyncs
1538 // calculation will directly fall at the boundary condition. In this case,
1539 // we should fall back to the basic logic of finding closest vsync
1540 // timestamp without worrying about minVsyncs.
1541 float captureToVsyncIntervalRatio = 1.0f * mMinExpectedDuration / vsyncEventData.frameInterval;
1542 float ratioDeviation = std::fabs(
1543 captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
1544 bool captureDeviateFromVsync = ratioDeviation >= kMaxIntervalRatioDeviation;
1545 bool cameraDisplayInSync = (mFixedFps && !captureDeviateFromVsync);
1546
1547 // Find best timestamp in the vsync timelines:
1548 // - Only use at most kMaxTimelines timelines to avoid long latency
1549 // - Add an extra timeline if display fence is used
1550 // - closest to the ideal presentation time,
1551 // - deadline timestamp is greater than the current time, and
1552 // - For fixed FPS, if the capture interval doesn't deviate too much from refresh interval,
1553 // the candidate presentation time is at least minInterval in the future compared to last
1554 // presentation time.
1555 // - For variable FPS, or if the capture interval deviates from refresh
1556 // interval for more than 5%, find a presentation time closest to the
1557 // (lastPresentationTime + captureToPresentOffset) instead.
1558 int fenceAdjustment = (mFenceSignalOffset > 0) ? 1 : 0;
1559 int maxTimelines = std::min(kMaxTimelines + fenceAdjustment,
1560 (int)vsyncEventData.frameTimelinesLength);
1561 float biasForShortDelay = 1.0f;
1562 for (int i = 0; i < maxTimelines; i ++) {
1563 const auto& vsyncTime = vsyncEventData.frameTimelines[i];
1564 if (minVsyncs > 0) {
1565 // Bias towards using smaller timeline index:
1566 // i = 0: bias = 1
1567 // i = maxTimelines-1: bias = -1
1568 biasForShortDelay = 1.0 - 2.0 * i / (maxTimelines - 1);
1569 }
1570 if (std::abs(vsyncTime.expectedPresentationTime - idealPresentT) < minDiff &&
1571 vsyncTime.deadlineTimestamp >= currentTime + mFenceSignalOffset &&
1572 ((!cameraDisplayInSync && vsyncTime.expectedPresentationTime > minPresentT) ||
1573 (cameraDisplayInSync && vsyncTime.expectedPresentationTime >
1574 mLastPresentTime + minInterval +
1575 static_cast<nsecs_t>(biasForShortDelay * kTimelineThresholdNs)))) {
1576 expectedPresentT = vsyncTime.expectedPresentationTime;
1577 minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
1578 }
1579 }
1580
1581 if (expectedPresentT == mLastPresentTime && expectedPresentT <
1582 vsyncEventData.frameTimelines[maxTimelines-1].expectedPresentationTime) {
1583 // Couldn't find a reasonable presentation time. Using last frame's
1584 // presentation time would cause a frame drop. The best option now
1585 // is to use the next VSync as long as the last presentation time
1586 // doesn't already has the maximum latency, in which case dropping the
1587 // buffer is more desired than increasing latency.
1588 //
1589 // Example: (60fps camera, 59.9hz refresh):
1590 // capture readout: | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
1591 // \ \ \ \ \ \ \ \ \
1592 // queue to BQ: | | | | | | | | |
1593 // \ \ \ \ \ \ \ \ \
1594 // display VSYNC: | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1595 //
1596 // |: 1 frame
1597 // t1 : 16.67ms
1598 // t2 : 16.69ms
1599 //
1600 // It takes 833 frames for capture readout count and display VSYNC count to be off
1601 // by 1.
1602 // - At frames [0, 832], presentationTime is set to timeline[0]
1603 // - At frames [833, 833*2-1], presentationTime is set to timeline[1]
1604 // - At frames [833*2, 833*3-1] presentationTime is set to timeline[2]
1605 // - At frame 833*3, no presentation time is found because we only
1606 // search for timeline[0..2].
1607 // - Drop one buffer is better than further extend the presentation
1608 // time.
1609 //
1610 // However, if frame 833*2 arrives 16.67ms early (right after frame
1611 // 833*2-1), no presentation time can be found because
1612 // getLatestVsyncEventData is called early. In that case, it's better to
1613 // set presentation time by offseting last presentation time.
1614 expectedPresentT += vsyncEventData.frameInterval;
1615 }
1616
1617 mLastCaptureTime = t;
1618 mLastPresentTime = expectedPresentT;
1619
1620 // Move the expected presentation time back by 1/3 of frame interval to
1621 // mitigate the time drift. Due to time drift, if we directly use the
1622 // expected presentation time, often times 2 expected presentation time
1623 // falls into the same VSYNC interval.
1624 return expectedPresentT - vsyncEventData.frameInterval/3;
1625 }
1626
shouldLogError(status_t res)1627 bool Camera3OutputStream::shouldLogError(status_t res) {
1628 Mutex::Autolock l(mLock);
1629 return shouldLogError(res, mState);
1630 }
1631
1632 }; // namespace camera3
1633
1634 }; // namespace android
1635