1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Camera3-DepthCompositeStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20
21 #include <aidl/android/hardware/camera/device/CameraBlob.h>
22 #include <aidl/android/hardware/camera/device/CameraBlobId.h>
23
24 #include "api1/client2/JpegProcessor.h"
25 #include "common/CameraProviderManager.h"
26 #include "utils/SessionConfigurationUtils.h"
27 #include <gui/Surface.h>
28 #include <utils/Log.h>
29 #include <utils/Trace.h>
30
31 #include "DepthCompositeStream.h"
32
33 namespace android {
34 namespace camera3 {
35
36 using aidl::android::hardware::camera::device::CameraBlob;
37 using aidl::android::hardware::camera::device::CameraBlobId;
38
DepthCompositeStream(sp<CameraDeviceBase> device,wp<hardware::camera2::ICameraDeviceCallbacks> cb)39 DepthCompositeStream::DepthCompositeStream(sp<CameraDeviceBase> device,
40 wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
41 CompositeStream(device, cb),
42 mBlobStreamId(-1),
43 mBlobSurfaceId(-1),
44 mDepthStreamId(-1),
45 mDepthSurfaceId(-1),
46 mBlobWidth(0),
47 mBlobHeight(0),
48 mDepthBufferAcquired(false),
49 mBlobBufferAcquired(false),
50 mProducerListener(new ProducerListener()),
51 mMaxJpegBufferSize(-1),
52 mUHRMaxJpegBufferSize(-1),
53 mIsLogicalCamera(false) {
54 if (device != nullptr) {
55 CameraMetadata staticInfo = device->info();
56 auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
57 if (entry.count > 0) {
58 mMaxJpegBufferSize = entry.data.i32[0];
59 } else {
60 ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
61 }
62
63 mUHRMaxJpegSize =
64 SessionConfigurationUtils::getMaxJpegResolution(staticInfo,
65 /*ultraHighResolution*/true);
66 mDefaultMaxJpegSize =
67 SessionConfigurationUtils::getMaxJpegResolution(staticInfo,
68 /*isUltraHighResolution*/false);
69
70 mUHRMaxJpegBufferSize =
71 SessionConfigurationUtils::getUHRMaxJpegBufferSize(mUHRMaxJpegSize, mDefaultMaxJpegSize,
72 mMaxJpegBufferSize);
73
74 entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
75 if (entry.count == 5) {
76 mIntrinsicCalibration.reserve(5);
77 mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
78 entry.data.f + 5);
79 } else {
80 ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
81 }
82
83 entry = staticInfo.find(ANDROID_LENS_DISTORTION);
84 if (entry.count == 5) {
85 mLensDistortion.reserve(5);
86 mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
87 } else {
88 ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
89 }
90
91 entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
92 for (size_t i = 0; i < entry.count; ++i) {
93 uint8_t capability = entry.data.u8[i];
94 if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
95 mIsLogicalCamera = true;
96 break;
97 }
98 }
99
100 getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes);
101 if (SessionConfigurationUtils::isUltraHighResolutionSensor(staticInfo)) {
102 getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution);
103 }
104 }
105 }
106
~DepthCompositeStream()107 DepthCompositeStream::~DepthCompositeStream() {
108 mBlobConsumer.clear(),
109 mBlobSurface.clear(),
110 mBlobStreamId = -1;
111 mBlobSurfaceId = -1;
112 mDepthConsumer.clear();
113 mDepthSurface.clear();
114 mDepthConsumer = nullptr;
115 mDepthSurface = nullptr;
116 }
117
compilePendingInputLocked()118 void DepthCompositeStream::compilePendingInputLocked() {
119 CpuConsumer::LockedBuffer imgBuffer;
120
121 while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
122 auto it = mInputJpegBuffers.begin();
123 auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
124 if (res == NOT_ENOUGH_DATA) {
125 // Can not lock any more buffers.
126 break;
127 } else if (res != OK) {
128 ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
129 strerror(-res), res);
130 mPendingInputFrames[*it].error = true;
131 mInputJpegBuffers.erase(it);
132 continue;
133 }
134
135 if (*it != imgBuffer.timestamp) {
136 ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
137 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
138 }
139
140 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
141 (mPendingInputFrames[imgBuffer.timestamp].error)) {
142 mBlobConsumer->unlockBuffer(imgBuffer);
143 } else {
144 mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
145 mBlobBufferAcquired = true;
146 }
147 mInputJpegBuffers.erase(it);
148 }
149
150 while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
151 auto it = mInputDepthBuffers.begin();
152 auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
153 if (res == NOT_ENOUGH_DATA) {
154 // Can not lock any more buffers.
155 break;
156 } else if (res != OK) {
157 ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
158 strerror(-res), res);
159 mPendingInputFrames[*it].error = true;
160 mInputDepthBuffers.erase(it);
161 continue;
162 }
163
164 if (*it != imgBuffer.timestamp) {
165 ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
166 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
167 }
168
169 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
170 (mPendingInputFrames[imgBuffer.timestamp].error)) {
171 mDepthConsumer->unlockBuffer(imgBuffer);
172 } else {
173 mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
174 mDepthBufferAcquired = true;
175 }
176 mInputDepthBuffers.erase(it);
177 }
178
179 while (!mCaptureResults.empty()) {
180 auto it = mCaptureResults.begin();
181 // Negative timestamp indicates that something went wrong during the capture result
182 // collection process.
183 if (it->first >= 0) {
184 mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
185 mPendingInputFrames[it->first].result = std::get<1>(it->second);
186 }
187 mCaptureResults.erase(it);
188 }
189
190 while (!mFrameNumberMap.empty()) {
191 auto it = mFrameNumberMap.begin();
192 mPendingInputFrames[it->second].frameNumber = it->first;
193 mFrameNumberMap.erase(it);
194 }
195
196 auto it = mErrorFrameNumbers.begin();
197 while (it != mErrorFrameNumbers.end()) {
198 bool frameFound = false;
199 for (auto &inputFrame : mPendingInputFrames) {
200 if (inputFrame.second.frameNumber == *it) {
201 inputFrame.second.error = true;
202 frameFound = true;
203 break;
204 }
205 }
206
207 if (frameFound) {
208 it = mErrorFrameNumbers.erase(it);
209 } else {
210 ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
211 *it);
212 it++;
213 }
214 }
215 }
216
getNextReadyInputLocked(int64_t * currentTs)217 bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
218 if (currentTs == nullptr) {
219 return false;
220 }
221
222 bool newInputAvailable = false;
223 for (const auto& it : mPendingInputFrames) {
224 if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
225 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
226 *currentTs = it.first;
227 newInputAvailable = true;
228 }
229 }
230
231 return newInputAvailable;
232 }
233
getNextFailingInputLocked(int64_t * currentTs)234 int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
235 int64_t ret = -1;
236 if (currentTs == nullptr) {
237 return ret;
238 }
239
240 for (const auto& it : mPendingInputFrames) {
241 if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
242 *currentTs = it.first;
243 ret = it.second.frameNumber;
244 }
245 }
246
247 return ret;
248 }
249
processInputFrame(nsecs_t ts,const InputFrame & inputFrame)250 status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
251 status_t res;
252 sp<ANativeWindow> outputANW = mOutputSurface;
253 ANativeWindowBuffer *anb;
254 int fenceFd;
255 void *dstBuffer;
256
257 auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
258 inputFrame.jpegBuffer.width);
259 if (jpegSize == 0) {
260 ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
261 jpegSize = inputFrame.jpegBuffer.width;
262 }
263
264 size_t maxDepthJpegBufferSize = 0;
265 if (mMaxJpegBufferSize > 0) {
266 // If this is an ultra high resolution sensor and the input frames size
267 // is > default res jpeg.
268 if (mUHRMaxJpegSize.width != 0 &&
269 inputFrame.jpegBuffer.width * inputFrame.jpegBuffer.height >
270 mDefaultMaxJpegSize.width * mDefaultMaxJpegSize.height) {
271 maxDepthJpegBufferSize = mUHRMaxJpegBufferSize;
272 } else {
273 maxDepthJpegBufferSize = mMaxJpegBufferSize;
274 }
275 } else {
276 maxDepthJpegBufferSize = std::max<size_t> (jpegSize,
277 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
278 }
279
280 uint8_t jpegQuality = 100;
281 auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
282 if (entry.count > 0) {
283 jpegQuality = entry.data.u8[0];
284 }
285
286 // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
287 // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
288 // max jpeg size.
289 size_t finalJpegBufferSize = maxDepthJpegBufferSize * 3;
290
291 if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
292 != OK) {
293 ALOGE("%s: Unable to configure stream buffer dimensions"
294 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
295 return res;
296 }
297
298 res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
299 if (res != OK) {
300 ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
301 res);
302 return res;
303 }
304
305 sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
306 GraphicBufferLocker gbLocker(gb);
307 res = gbLocker.lockAsync(&dstBuffer, fenceFd);
308 if (res != OK) {
309 ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
310 strerror(-res), res);
311 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
312 return res;
313 }
314
315 if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
316 ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
317 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
318 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
319 return BAD_VALUE;
320 }
321
322 DepthPhotoInputFrame depthPhoto;
323 depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
324 depthPhoto.mMainJpegWidth = mBlobWidth;
325 depthPhoto.mMainJpegHeight = mBlobHeight;
326 depthPhoto.mMainJpegSize = jpegSize;
327 depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
328 depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
329 depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
330 depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
331 depthPhoto.mJpegQuality = jpegQuality;
332 depthPhoto.mIsLogical = mIsLogicalCamera;
333 depthPhoto.mMaxJpegSize = maxDepthJpegBufferSize;
334 // The camera intrinsic calibration layout is as follows:
335 // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
336 if (mIntrinsicCalibration.size() == 5) {
337 memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
338 sizeof(depthPhoto.mIntrinsicCalibration));
339 depthPhoto.mIsIntrinsicCalibrationValid = 1;
340 } else {
341 depthPhoto.mIsIntrinsicCalibrationValid = 0;
342 }
343 // The camera lens distortion contains the following lens correction coefficients.
344 // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
345 if (mLensDistortion.size() == 5) {
346 memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
347 sizeof(depthPhoto.mLensDistortion));
348 depthPhoto.mIsLensDistortionValid = 1;
349 } else {
350 depthPhoto.mIsLensDistortionValid = 0;
351 }
352 entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
353 if (entry.count > 0) {
354 // The camera jpeg orientation values must be within [0, 90, 180, 270].
355 switch (entry.data.i32[0]) {
356 case 0:
357 case 90:
358 case 180:
359 case 270:
360 depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
361 break;
362 default:
363 ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
364 __FUNCTION__, entry.data.i32[0]);
365 }
366 }
367
368 size_t actualJpegSize = 0;
369 res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
370 if (res != 0) {
371 ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
372 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
373 return res;
374 }
375
376 size_t finalJpegSize = actualJpegSize + sizeof(CameraBlob);
377 if (finalJpegSize > finalJpegBufferSize) {
378 ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
379 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
380 return NO_MEMORY;
381 }
382
383 res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
384 if (res != OK) {
385 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
386 getStreamId(), strerror(-res), res);
387 return res;
388 }
389
390 ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
391 uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
392 (gb->getWidth() - sizeof(CameraBlob));
393 CameraBlob *blob = reinterpret_cast<CameraBlob*> (header);
394 blob->blobId = CameraBlobId::JPEG;
395 blob->blobSizeBytes = actualJpegSize;
396 outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
397
398 return res;
399 }
400
releaseInputFrameLocked(InputFrame * inputFrame)401 void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
402 if (inputFrame == nullptr) {
403 return;
404 }
405
406 if (inputFrame->depthBuffer.data != nullptr) {
407 mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
408 inputFrame->depthBuffer.data = nullptr;
409 mDepthBufferAcquired = false;
410 }
411
412 if (inputFrame->jpegBuffer.data != nullptr) {
413 mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
414 inputFrame->jpegBuffer.data = nullptr;
415 mBlobBufferAcquired = false;
416 }
417
418 if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
419 //TODO: Figure out correct requestId
420 notifyError(inputFrame->frameNumber, -1 /*requestId*/);
421 inputFrame->errorNotified = true;
422 }
423 }
424
releaseInputFramesLocked(int64_t currentTs)425 void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
426 auto it = mPendingInputFrames.begin();
427 while (it != mPendingInputFrames.end()) {
428 if (it->first <= currentTs) {
429 releaseInputFrameLocked(&it->second);
430 it = mPendingInputFrames.erase(it);
431 } else {
432 it++;
433 }
434 }
435 }
436
threadLoop()437 bool DepthCompositeStream::threadLoop() {
438 int64_t currentTs = INT64_MAX;
439 bool newInputAvailable = false;
440
441 {
442 Mutex::Autolock l(mMutex);
443
444 if (mErrorState) {
445 // In case we landed in error state, return any pending buffers and
446 // halt all further processing.
447 compilePendingInputLocked();
448 releaseInputFramesLocked(currentTs);
449 return false;
450 }
451
452 while (!newInputAvailable) {
453 compilePendingInputLocked();
454 newInputAvailable = getNextReadyInputLocked(¤tTs);
455 if (!newInputAvailable) {
456 auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
457 if (failingFrameNumber >= 0) {
458 // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
459 // possible for two internal stream buffers to fail. In such scenario the
460 // composite stream should notify the client about a stream buffer error only
461 // once and this information is kept within 'errorNotified'.
462 // Any present failed input frames will be removed on a subsequent call to
463 // 'releaseInputFramesLocked()'.
464 releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
465 currentTs = INT64_MAX;
466 }
467
468 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
469 if (ret == TIMED_OUT) {
470 return true;
471 } else if (ret != OK) {
472 ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
473 strerror(-ret), ret);
474 return false;
475 }
476 }
477 }
478 }
479
480 auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
481 Mutex::Autolock l(mMutex);
482 if (res != OK) {
483 ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
484 currentTs, strerror(-res), res);
485 mPendingInputFrames[currentTs].error = true;
486 }
487
488 releaseInputFramesLocked(currentTs);
489
490 return true;
491 }
492
isDepthCompositeStream(const sp<Surface> & surface)493 bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
494 ANativeWindow *anw = surface.get();
495 status_t err;
496 int format;
497 if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
498 String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
499 err);
500 ALOGE("%s: %s", __FUNCTION__, msg.string());
501 return false;
502 }
503
504 int dataspace;
505 if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
506 String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
507 err);
508 ALOGE("%s: %s", __FUNCTION__, msg.string());
509 return false;
510 }
511
512 if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
513 return true;
514 }
515
516 return false;
517 }
518
setContains(std::unordered_set<int32_t> containerSet,int32_t value)519 static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) {
520 return containerSet.find(value) != containerSet.end();
521 }
522
checkAndGetMatchingDepthSize(size_t width,size_t height,const std::vector<std::tuple<size_t,size_t>> & depthSizes,const std::vector<std::tuple<size_t,size_t>> & depthSizesMaximumResolution,const std::unordered_set<int32_t> & sensorPixelModesUsed,size_t * depthWidth,size_t * depthHeight)523 status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height,
524 const std::vector<std::tuple<size_t, size_t>> &depthSizes,
525 const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
526 const std::unordered_set<int32_t> &sensorPixelModesUsed,
527 size_t *depthWidth, size_t *depthHeight) {
528 if (depthWidth == nullptr || depthHeight == nullptr) {
529 return BAD_VALUE;
530 }
531 size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
532 bool hasDefaultSensorPixelMode =
533 setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
534
535 bool hasMaximumResolutionSensorPixelMode =
536 setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
537
538 if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) {
539 ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes",
540 __FUNCTION__);
541 return BAD_VALUE;
542 }
543
544 if (hasDefaultSensorPixelMode) {
545 auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth,
546 &chosenDepthHeight);
547 if (ret != OK) {
548 ALOGE("%s: No matching depth stream size found", __FUNCTION__);
549 return ret;
550 }
551 }
552
553 if (hasMaximumResolutionSensorPixelMode) {
554 size_t depthWidth = 0, depthHeight = 0;
555 auto ret = getMatchingDepthSize(width, height,
556 depthSizesMaximumResolution, &depthWidth, &depthHeight);
557 if (ret != OK) {
558 ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__);
559 return ret;
560 }
561 // Both matching depth sizes should be the same.
562 if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth &&
563 chosenDepthHeight != depthHeight) {
564 ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't"
565 " have matching depth sizes", __FUNCTION__);
566 return BAD_VALUE;
567 }
568 if (chosenDepthWidth == 0) {
569 chosenDepthWidth = depthWidth;
570 chosenDepthHeight = depthHeight;
571 }
572 }
573 *depthWidth = chosenDepthWidth;
574 *depthHeight = chosenDepthHeight;
575 return OK;
576 }
577
578
createInternalStreams(const std::vector<sp<Surface>> & consumers,bool,uint32_t width,uint32_t height,int format,camera_stream_rotation_t rotation,int * id,const String8 & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,std::vector<int> * surfaceIds,int,bool)579 status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
580 bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
581 camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
582 const std::unordered_set<int32_t> &sensorPixelModesUsed,
583 std::vector<int> *surfaceIds,
584 int /*streamSetId*/, bool /*isShared*/) {
585 if (mSupportedDepthSizes.empty()) {
586 ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
587 return INVALID_OPERATION;
588 }
589
590 size_t depthWidth, depthHeight;
591 auto ret =
592 checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes,
593 mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth,
594 &depthHeight);
595 if (ret != OK) {
596 ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
597 return ret;
598 }
599
600 sp<CameraDeviceBase> device = mDevice.promote();
601 if (!device.get()) {
602 ALOGE("%s: Invalid camera device!", __FUNCTION__);
603 return NO_INIT;
604 }
605
606 sp<IGraphicBufferProducer> producer;
607 sp<IGraphicBufferConsumer> consumer;
608 BufferQueue::createBufferQueue(&producer, &consumer);
609 mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
610 mBlobConsumer->setFrameAvailableListener(this);
611 mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
612 mBlobSurface = new Surface(producer);
613
614 ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
615 id, physicalCameraId, sensorPixelModesUsed, surfaceIds);
616 if (ret == OK) {
617 mBlobStreamId = *id;
618 mBlobSurfaceId = (*surfaceIds)[0];
619 mOutputSurface = consumers[0];
620 } else {
621 return ret;
622 }
623
624 BufferQueue::createBufferQueue(&producer, &consumer);
625 mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
626 mDepthConsumer->setFrameAvailableListener(this);
627 mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
628 mDepthSurface = new Surface(producer);
629 std::vector<int> depthSurfaceId;
630 ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
631 kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
632 &depthSurfaceId);
633 if (ret == OK) {
634 mDepthSurfaceId = depthSurfaceId[0];
635 } else {
636 return ret;
637 }
638
639 ret = registerCompositeStreamListener(getStreamId());
640 if (ret != OK) {
641 ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
642 return ret;
643 }
644
645 ret = registerCompositeStreamListener(mDepthStreamId);
646 if (ret != OK) {
647 ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
648 return ret;
649 }
650
651 mBlobWidth = width;
652 mBlobHeight = height;
653
654 return ret;
655 }
656
configureStream()657 status_t DepthCompositeStream::configureStream() {
658 if (isRunning()) {
659 // Processing thread is already running, nothing more to do.
660 return NO_ERROR;
661 }
662
663 if (mOutputSurface.get() == nullptr) {
664 ALOGE("%s: No valid output surface set!", __FUNCTION__);
665 return NO_INIT;
666 }
667
668 auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
669 if (res != OK) {
670 ALOGE("%s: Unable to connect to native window for stream %d",
671 __FUNCTION__, mBlobStreamId);
672 return res;
673 }
674
675 if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
676 != OK) {
677 ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
678 mBlobStreamId);
679 return res;
680 }
681
682 int maxProducerBuffers;
683 ANativeWindow *anw = mBlobSurface.get();
684 if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
685 ALOGE("%s: Unable to query consumer undequeued"
686 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
687 return res;
688 }
689
690 ANativeWindow *anwConsumer = mOutputSurface.get();
691 int maxConsumerBuffers;
692 if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
693 &maxConsumerBuffers)) != OK) {
694 ALOGE("%s: Unable to query consumer undequeued"
695 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
696 return res;
697 }
698
699 if ((res = native_window_set_buffer_count(
700 anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
701 ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
702 return res;
703 }
704
705 run("DepthCompositeStreamProc");
706
707 return NO_ERROR;
708 }
709
deleteInternalStreams()710 status_t DepthCompositeStream::deleteInternalStreams() {
711 // The 'CameraDeviceClient' parent will delete the blob stream
712 requestExit();
713
714 auto ret = join();
715 if (ret != OK) {
716 ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
717 strerror(-ret), ret);
718 }
719
720 if (mDepthStreamId >= 0) {
721 // Camera devices may not be valid after switching to offline mode.
722 // In this case, all offline streams including internal composite streams
723 // are managed and released by the offline session.
724 sp<CameraDeviceBase> device = mDevice.promote();
725 if (device.get() != nullptr) {
726 ret = device->deleteStream(mDepthStreamId);
727 }
728
729 mDepthStreamId = -1;
730 }
731
732 if (mOutputSurface != nullptr) {
733 mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
734 mOutputSurface.clear();
735 }
736
737 return ret;
738 }
739
onFrameAvailable(const BufferItem & item)740 void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
741 if (item.mDataSpace == kJpegDataSpace) {
742 ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
743 __func__, ns2ms(item.mTimestamp));
744
745 Mutex::Autolock l(mMutex);
746 if (!mErrorState) {
747 mInputJpegBuffers.push_back(item.mTimestamp);
748 mInputReadyCondition.signal();
749 }
750 } else if (item.mDataSpace == kDepthMapDataSpace) {
751 ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
752 ns2ms(item.mTimestamp));
753
754 Mutex::Autolock l(mMutex);
755 if (!mErrorState) {
756 mInputDepthBuffers.push_back(item.mTimestamp);
757 mInputReadyCondition.signal();
758 }
759 } else {
760 ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
761 }
762 }
763
insertGbp(SurfaceMap * outSurfaceMap,Vector<int32_t> * outputStreamIds,int32_t * currentStreamId)764 status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
765 Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
766 if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
767 outputStreamIds->push_back(mDepthStreamId);
768 }
769 (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
770
771 if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
772 outputStreamIds->push_back(mBlobStreamId);
773 }
774 (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
775
776 if (currentStreamId != nullptr) {
777 *currentStreamId = mBlobStreamId;
778 }
779
780 return NO_ERROR;
781 }
782
insertCompositeStreamIds(std::vector<int32_t> * compositeStreamIds)783 status_t DepthCompositeStream::insertCompositeStreamIds(
784 std::vector<int32_t>* compositeStreamIds /*out*/) {
785 if (compositeStreamIds == nullptr) {
786 return BAD_VALUE;
787 }
788
789 compositeStreamIds->push_back(mDepthStreamId);
790 compositeStreamIds->push_back(mBlobStreamId);
791
792 return OK;
793 }
794
onResultError(const CaptureResultExtras & resultExtras)795 void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
796 // Processing can continue even in case of result errors.
797 // At the moment depth composite stream processing relies mainly on static camera
798 // characteristics data. The actual result data can be used for the jpeg quality but
799 // in case it is absent we can default to maximum.
800 eraseResult(resultExtras.frameNumber);
801 }
802
onStreamBufferError(const CaptureResultExtras & resultExtras)803 bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
804 bool ret = false;
805 // Buffer errors concerning internal composite streams should not be directly visible to
806 // camera clients. They must only receive a single buffer error with the public composite
807 // stream id.
808 if ((resultExtras.errorStreamId == mDepthStreamId) ||
809 (resultExtras.errorStreamId == mBlobStreamId)) {
810 flagAnErrorFrameNumber(resultExtras.frameNumber);
811 ret = true;
812 }
813
814 return ret;
815 }
816
getMatchingDepthSize(size_t width,size_t height,const std::vector<std::tuple<size_t,size_t>> & supporedDepthSizes,size_t * depthWidth,size_t * depthHeight)817 status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
818 const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
819 size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
820 if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
821 return BAD_VALUE;
822 }
823
824 float arTol = CameraProviderManager::kDepthARTolerance;
825 *depthWidth = *depthHeight = 0;
826
827 float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
828 for (const auto& it : supporedDepthSizes) {
829 auto currentWidth = std::get<0>(it);
830 auto currentHeight = std::get<1>(it);
831 if ((currentWidth == width) && (currentHeight == height)) {
832 *depthWidth = width;
833 *depthHeight = height;
834 break;
835 } else {
836 float currentRatio = static_cast<float> (currentWidth) /
837 static_cast<float> (currentHeight);
838 auto currentSize = currentWidth * currentHeight;
839 auto oldSize = (*depthWidth) * (*depthHeight);
840 if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
841 *depthWidth = currentWidth;
842 *depthHeight = currentHeight;
843 }
844 }
845 }
846
847 return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
848 }
849
getSupportedDepthSizes(const CameraMetadata & ch,bool maxResolution,std::vector<std::tuple<size_t,size_t>> * depthSizes)850 void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
851 std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
852 if (depthSizes == nullptr) {
853 return;
854 }
855
856 auto entry = ch.find(
857 camera3::SessionConfigurationUtils::getAppropriateModeTag(
858 ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution));
859 if (entry.count > 0) {
860 // Depth stream dimensions have four int32_t components
861 // (pixelformat, width, height, type)
862 size_t entryCount = entry.count / 4;
863 depthSizes->reserve(entryCount);
864 for (size_t i = 0; i < entry.count; i += 4) {
865 if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
866 (entry.data.i32[i+3] ==
867 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
868 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
869 entry.data.i32[i+2]));
870 }
871 }
872 }
873 }
874
getCompositeStreamInfo(const OutputStreamInfo & streamInfo,const CameraMetadata & ch,std::vector<OutputStreamInfo> * compositeOutput)875 status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
876 const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
877 if (compositeOutput == nullptr) {
878 return BAD_VALUE;
879 }
880
881 std::vector<std::tuple<size_t, size_t>> depthSizes;
882 std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution;
883 getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes);
884 if (depthSizes.empty()) {
885 ALOGE("%s: No depth stream configurations present", __FUNCTION__);
886 return BAD_VALUE;
887 }
888
889 if (SessionConfigurationUtils::isUltraHighResolutionSensor(ch)) {
890 getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution);
891 if (depthSizesMaximumResolution.empty()) {
892 ALOGE("%s: No depth stream configurations for maximum resolution present",
893 __FUNCTION__);
894 return BAD_VALUE;
895 }
896 }
897
898 size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
899 auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes,
900 depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth,
901 &chosenDepthHeight);
902
903 if (ret != OK) {
904 ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__);
905 return ret;
906 }
907
908 compositeOutput->clear();
909 compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
910
911 // Sensor pixel modes should stay the same here. They're already overridden.
912 // Jpeg/Blob stream info
913 (*compositeOutput)[0].dataSpace = kJpegDataSpace;
914 (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
915
916 // Depth stream info
917 (*compositeOutput)[1].width = chosenDepthWidth;
918 (*compositeOutput)[1].height = chosenDepthHeight;
919 (*compositeOutput)[1].format = kDepthMapPixelFormat;
920 (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
921 (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
922
923 return NO_ERROR;
924 }
925
926 }; // namespace camera3
927 }; // namespace android
928