1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "buffer_queue.h"
17 #include <algorithm>
18 #include <chrono>
19 #include <fstream>
20 #include <linux/dma-buf.h>
21 #include <sstream>
22 #include <sys/ioctl.h>
23 #include <sys/time.h>
24 #include <cinttypes>
25 #include <unistd.h>
26 #include <parameters.h>
27
28 #include "acquire_fence_manager.h"
29 #include "buffer_utils.h"
30 #include "buffer_log.h"
31 #include "hebc_white_list.h"
32 #include "hitrace_meter.h"
33 #include "metadata_helper.h"
34 #include "sandbox_utils.h"
35 #include "surface_buffer_impl.h"
36 #include "sync_fence.h"
37 #include "sync_fence_tracker.h"
38 #include "surface_utils.h"
39 #include "surface_trace.h"
40 #include "v2_0/buffer_handle_meta_key_type.h"
41
42 namespace OHOS {
43 namespace {
44 constexpr int32_t FORCE_GLOBAL_ALPHA_MIN = -1;
45 constexpr int32_t FORCE_GLOBAL_ALPHA_MAX = 255;
46 constexpr uint32_t UNIQUE_ID_OFFSET = 32;
47 constexpr uint32_t BUFFER_MEMSIZE_RATE = 1024;
48 constexpr uint32_t BUFFER_MEMSIZE_FORMAT = 2;
49 constexpr uint32_t MAXIMUM_LENGTH_OF_APP_FRAMEWORK = 64;
50 constexpr uint32_t INVALID_SEQUENCE = 0xFFFFFFFF;
51 constexpr uint32_t ONE_SECOND_TIMESTAMP = 1e9;
52 constexpr const char* BUFFER_SUPPORT_FASTCOMPOSE = "SupportFastCompose";
53 }
54
55 static const std::map<BufferState, std::string> BufferStateStrs = {
56 {BUFFER_STATE_RELEASED, "0 <released>"},
57 {BUFFER_STATE_REQUESTED, "1 <requested>"},
58 {BUFFER_STATE_FLUSHED, "2 <flushed>"},
59 {BUFFER_STATE_ACQUIRED, "3 <acquired>"},
60 {BUFFER_STATE_ATTACHED, "4 <attached>"},
61 };
62
GetUniqueIdImpl()63 static uint64_t GetUniqueIdImpl()
64 {
65 static std::atomic<uint32_t> counter { 0 };
66 static uint64_t id = static_cast<uint64_t>(GetRealPid()) << UNIQUE_ID_OFFSET;
67 return id | counter.fetch_add(1, std::memory_order_relaxed);
68 }
69
IsLocalRender()70 static bool IsLocalRender()
71 {
72 std::ifstream procfile("/proc/self/cmdline");
73 if (!procfile.is_open()) {
74 BLOGE("Error opening procfile!");
75 return false;
76 }
77 std::string processName;
78 std::getline(procfile, processName);
79 procfile.close();
80 std::string target = "/system/bin/render_service";
81 bool result = processName.substr(0, target.size()) == target;
82 return result;
83 }
84
BufferQueue(const std::string & name)85 BufferQueue::BufferQueue(const std::string &name)
86 : name_(name), uniqueId_(GetUniqueIdImpl()), isLocalRender_(IsLocalRender())
87 {
88 BLOGD("BufferQueue ctor, uniqueId: %{public}" PRIu64 ".", uniqueId_);
89 acquireLastFlushedBufSequence_ = INVALID_SEQUENCE;
90
91 if (isLocalRender_) {
92 if (!HebcWhiteList::GetInstance().Init()) {
93 BLOGW("HebcWhiteList init failed");
94 }
95 }
96 }
97
~BufferQueue()98 BufferQueue::~BufferQueue()
99 {
100 BLOGD("~BufferQueue dtor, uniqueId: %{public}" PRIu64 ".", uniqueId_);
101 for (auto &[id, _] : bufferQueueCache_) {
102 OnBufferDeleteForRS(id);
103 }
104 }
105
GetUsedSize()106 uint32_t BufferQueue::GetUsedSize()
107 {
108 return static_cast<uint32_t>(bufferQueueCache_.size());
109 }
110
GetProducerInitInfo(ProducerInitInfo & info)111 GSError BufferQueue::GetProducerInitInfo(ProducerInitInfo &info)
112 {
113 static uint64_t producerId = 1; // producerId start from 1; 0 resvered for comsumer
114 std::lock_guard<std::mutex> lockGuard(mutex_);
115 info.name = name_;
116 info.width = defaultWidth_;
117 info.height = defaultHeight_;
118 info.uniqueId = uniqueId_;
119 info.isInHebcList = HebcWhiteList::GetInstance().Check(info.appName);
120 info.bufferName = bufferName_;
121 info.producerId = producerId++;
122 info.transformHint = transformHint_;
123 return GSERROR_OK;
124 }
125
PopFromFreeListLocked(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)126 GSError BufferQueue::PopFromFreeListLocked(sptr<SurfaceBuffer> &buffer,
127 const BufferRequestConfig &config)
128 {
129 for (auto it = freeList_.begin(); it != freeList_.end(); it++) {
130 auto mapIter = bufferQueueCache_.find(*it);
131 if (mapIter != bufferQueueCache_.end() && mapIter->second.config == config) {
132 if (mapIter->first == acquireLastFlushedBufSequence_) {
133 continue;
134 }
135 buffer = mapIter->second.buffer;
136 freeList_.erase(it);
137 return GSERROR_OK;
138 }
139 }
140
141 if (freeList_.empty() || GetUsedSize() < bufferQueueSize_ - detachReserveSlotNum_ ||
142 (freeList_.size() == 1 && freeList_.front() == acquireLastFlushedBufSequence_)) {
143 buffer = nullptr;
144 return GSERROR_NO_BUFFER;
145 }
146
147 if (freeList_.front() == acquireLastFlushedBufSequence_) {
148 freeList_.pop_front();
149 freeList_.push_back(acquireLastFlushedBufSequence_);
150 }
151
152 buffer = bufferQueueCache_[freeList_.front()].buffer;
153 buffer->SetSurfaceBufferColorGamut(config.colorGamut);
154 buffer->SetSurfaceBufferTransform(config.transform);
155 freeList_.pop_front();
156 return GSERROR_OK;
157 }
158
PopFromDirtyListLocked(sptr<SurfaceBuffer> & buffer)159 GSError BufferQueue::PopFromDirtyListLocked(sptr<SurfaceBuffer> &buffer)
160 {
161 if (!dirtyList_.empty()) {
162 buffer = bufferQueueCache_[dirtyList_.front()].buffer;
163 dirtyList_.pop_front();
164 return GSERROR_OK;
165 } else {
166 buffer = nullptr;
167 return GSERROR_NO_BUFFER;
168 }
169 }
170
CheckRequestConfig(const BufferRequestConfig & config)171 GSError BufferQueue::CheckRequestConfig(const BufferRequestConfig &config)
172 {
173 if (config.colorGamut <= GraphicColorGamut::GRAPHIC_COLOR_GAMUT_INVALID ||
174 config.colorGamut > GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020 + 1) {
175 BLOGW("colorGamut is %{public}d, uniqueId: %{public}" PRIu64 ".",
176 static_cast<uint32_t>(config.colorGamut), uniqueId_);
177 return GSERROR_INVALID_ARGUMENTS;
178 }
179
180 if (config.transform < GraphicTransformType::GRAPHIC_ROTATE_NONE ||
181 config.transform >= GraphicTransformType::GRAPHIC_ROTATE_BUTT) {
182 BLOGW("transform is %{public}d, uniqueId: %{public}" PRIu64 ".", config.transform, uniqueId_);
183 return GSERROR_INVALID_ARGUMENTS;
184 }
185 return GSERROR_OK;
186 }
187
CheckFlushConfig(const BufferFlushConfigWithDamages & config)188 GSError BufferQueue::CheckFlushConfig(const BufferFlushConfigWithDamages &config)
189 {
190 for (decltype(config.damages.size()) i = 0; i < config.damages.size(); i++) {
191 if (config.damages[i].w < 0 || config.damages[i].h < 0) {
192 BLOGW("damages[%{public}zu].w is %{public}d, .h is %{public}d, uniqueId: %{public}" PRIu64 ".",
193 i, config.damages[i].w, config.damages[i].h, uniqueId_);
194 return GSERROR_INVALID_ARGUMENTS;
195 }
196 }
197 return GSERROR_OK;
198 }
199
QueryIfBufferAvailable()200 bool BufferQueue::QueryIfBufferAvailable()
201 {
202 std::lock_guard<std::mutex> lockGuard(mutex_);
203 bool ret = !freeList_.empty() || (GetUsedSize() < bufferQueueSize_);
204 return ret;
205 }
206
DelegatorDequeueBuffer(wptr<ConsumerSurfaceDelegator> & delegator,const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)207 static GSError DelegatorDequeueBuffer(wptr<ConsumerSurfaceDelegator>& delegator,
208 const BufferRequestConfig& config,
209 sptr<BufferExtraData>& bedata,
210 struct IBufferProducer::RequestBufferReturnValue& retval)
211 {
212 auto consumerDelegator = delegator.promote();
213 if (consumerDelegator == nullptr) {
214 BLOGE("Consumer surface delegator has been expired");
215 return GSERROR_INVALID_ARGUMENTS;
216 }
217 auto ret = consumerDelegator->DequeueBuffer(config, bedata, retval);
218 if (ret != GSERROR_OK) {
219 BLOGE("Consumer surface delegator failed to dequeuebuffer, err: %{public}d", ret);
220 return ret;
221 }
222
223 ret = retval.buffer->Map();
224 if (ret != GSERROR_OK) {
225 BLOGE("Buffer map failed, err: %{public}d", ret);
226 return ret;
227 }
228 retval.buffer->SetSurfaceBufferWidth(retval.buffer->GetWidth());
229 retval.buffer->SetSurfaceBufferHeight(retval.buffer->GetHeight());
230
231 return GSERROR_OK;
232 }
233
SetReturnValue(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)234 static void SetReturnValue(sptr<SurfaceBuffer>& buffer, sptr<BufferExtraData>& bedata,
235 struct IBufferProducer::RequestBufferReturnValue& retval)
236 {
237 retval.sequence = buffer->GetSeqNum();
238 bedata = buffer->GetExtraData();
239 retval.fence = SyncFence::InvalidFence();
240 }
241
SetSurfaceBufferHebcMetaLocked(sptr<SurfaceBuffer> buffer)242 void BufferQueue::SetSurfaceBufferHebcMetaLocked(sptr<SurfaceBuffer> buffer)
243 {
244 using namespace HDI::Display::Graphic::Common;
245 // usage does not contain BUFFER_USAGE_CPU_HW_BOTH, just return
246 if (!(buffer->GetUsage() & BUFFER_USAGE_CPU_HW_BOTH)) {
247 return;
248 }
249
250 V2_0::BufferHandleAttrKey key = V2_0::BufferHandleAttrKey::ATTRKEY_REQUEST_ACCESS_TYPE;
251 std::vector<uint8_t> values;
252 if (isCpuAccessable_) { // hebc is off
253 values.emplace_back(static_cast<uint8_t>(V2_0::HebcAccessType::HEBC_ACCESS_CPU_ACCESS));
254 } else { // hebc is on
255 values.emplace_back(static_cast<uint8_t>(V2_0::HebcAccessType::HEBC_ACCESS_HW_ONLY));
256 }
257
258 buffer->SetMetadata(key, values);
259 }
260
SetBatchHandle(bool batch)261 void BufferQueue::SetBatchHandle(bool batch)
262 {
263 std::unique_lock<std::mutex> lock(mutex_);
264 isBatch_ = batch;
265 }
266
RequestBufferCheckStatus()267 GSError BufferQueue::RequestBufferCheckStatus()
268 {
269 if (isBatch_) {
270 return GSERROR_OK;
271 }
272 if (!GetStatusLocked()) {
273 SURFACE_TRACE_NAME_FMT("RequestBufferCheckStatus status wrong,"
274 "surface name: %s queueId: %" PRIu64 " status: %u", name_.c_str(), uniqueId_, GetStatusLocked());
275 BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
276 }
277 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
278 if (listener_ == nullptr && listenerClazz_ == nullptr) {
279 SURFACE_TRACE_NAME_FMT("RequestBufferCheckStatus no listener, surface name: %s queueId: %" PRIu64,
280 name_.c_str(), uniqueId_);
281 BLOGN_FAILURE_RET(SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER);
282 }
283
284 return GSERROR_OK;
285 }
286
WaitForCondition()287 bool BufferQueue::WaitForCondition()
288 {
289 return (!freeList_.empty() && !(freeList_.size() == 1 && freeList_.front() == acquireLastFlushedBufSequence_)) ||
290 (GetUsedSize() < bufferQueueSize_ - detachReserveSlotNum_) || !GetStatusLocked();
291 }
292
RequestBufferDebugInfoLocked()293 void BufferQueue::RequestBufferDebugInfoLocked()
294 {
295 SURFACE_TRACE_NAME_FMT("lockLastFlushedBuffer seq: %u, reserveSlotNum: %u",
296 acquireLastFlushedBufSequence_, detachReserveSlotNum_);
297 std::map<BufferState, int32_t> bufferState;
298 for (auto &[id, ele] : bufferQueueCache_) {
299 SURFACE_TRACE_NAME_FMT("request buffer id: %u state: %u", id, ele.state);
300 bufferState[ele.state] += 1;
301 }
302 std::string str = std::to_string(uniqueId_) +
303 ", Released: " + std::to_string(bufferState[BUFFER_STATE_RELEASED]) +
304 " Requested: " + std::to_string(bufferState[BUFFER_STATE_REQUESTED]) +
305 " Flushed: " + std::to_string(bufferState[BUFFER_STATE_FLUSHED]) +
306 " Acquired: " + std::to_string(bufferState[BUFFER_STATE_ACQUIRED]);
307 if (str.compare(requestBufferStateStr_) != 0) {
308 requestBufferStateStr_ = str;
309 BLOGE("all buffer are using, uniqueId: %{public}s", str.c_str());
310 }
311 }
312
RequestBufferLocked(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)313 GSError BufferQueue::RequestBufferLocked(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
314 struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
315 {
316 GSError ret = RequestBufferCheckStatus();
317 if (ret != GSERROR_OK) {
318 return ret;
319 }
320
321 // check param
322 BufferRequestConfig updateConfig = config;
323 updateConfig.usage |= defaultUsage_;
324 ret = CheckRequestConfig(updateConfig);
325 if (ret != GSERROR_OK) {
326 BLOGE("CheckRequestConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", ret, uniqueId_);
327 return SURFACE_ERROR_UNKOWN;
328 }
329 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
330 SURFACE_TRACE_NAME_FMT("RequestBuffer name: %s queueId: %" PRIu64 " queueSize: %u reserveSlotNum: %u",
331 name_.c_str(), uniqueId_, bufferQueueSize_, detachReserveSlotNum_);
332 // dequeue from free list
333 sptr<SurfaceBuffer>& buffer = retval.buffer;
334 ret = PopFromFreeListLocked(buffer, updateConfig);
335 if (ret == GSERROR_OK) {
336 return ReuseBuffer(updateConfig, bedata, retval, lock);
337 }
338
339 // check queue size
340 if (GetUsedSize() >= bufferQueueSize_ - detachReserveSlotNum_) {
341 waitReqCon_.wait_for(lock, std::chrono::milliseconds(config.timeout),
342 [this]() { return WaitForCondition(); });
343 if (!GetStatusLocked() && !isBatch_) {
344 SURFACE_TRACE_NAME_FMT("Status wrong, status: %d", GetStatusLocked());
345 BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
346 }
347 // try dequeue from free list again
348 ret = PopFromFreeListLocked(buffer, updateConfig);
349 if (ret == GSERROR_OK) {
350 return ReuseBuffer(updateConfig, bedata, retval, lock);
351 } else if (GetUsedSize() >= bufferQueueSize_ - detachReserveSlotNum_) {
352 RequestBufferDebugInfoLocked();
353 return GSERROR_NO_BUFFER;
354 }
355 }
356
357 ret = AllocBuffer(buffer, updateConfig, lock);
358 if (ret == GSERROR_OK) {
359 AddDeletingBuffersLocked(retval.deletingBuffers);
360 SetSurfaceBufferHebcMetaLocked(buffer);
361 SetSurfaceBufferGlobalAlphaUnlocked(buffer);
362 SetReturnValue(buffer, bedata, retval);
363 } else {
364 BLOGE("Fail to alloc or map Buffer[%{public}d %{public}d] ret: %{public}d, uniqueId: %{public}" PRIu64,
365 config.width, config.height, ret, uniqueId_);
366 }
367 return ret;
368 }
369
RequestBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)370 GSError BufferQueue::RequestBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
371 struct IBufferProducer::RequestBufferReturnValue &retval)
372 {
373 if (wpCSurfaceDelegator_ != nullptr) {
374 return DelegatorDequeueBuffer(wpCSurfaceDelegator_, config, bedata, retval);
375 }
376 std::unique_lock<std::mutex> lock(mutex_);
377 return RequestBufferLocked(config, bedata, retval, lock);
378 }
379
SetProducerCacheCleanFlag(bool flag)380 GSError BufferQueue::SetProducerCacheCleanFlag(bool flag)
381 {
382 std::unique_lock<std::mutex> lock(mutex_);
383 return SetProducerCacheCleanFlagLocked(flag, lock);
384 }
385
SetProducerCacheCleanFlagLocked(bool flag,std::unique_lock<std::mutex> & lock)386 GSError BufferQueue::SetProducerCacheCleanFlagLocked(bool flag, std::unique_lock<std::mutex> &lock)
387 {
388 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
389 producerCacheClean_ = flag;
390 producerCacheList_.clear();
391 return GSERROR_OK;
392 }
393
CheckProducerCacheListLocked()394 bool BufferQueue::CheckProducerCacheListLocked()
395 {
396 for (auto &[id, _] : bufferQueueCache_) {
397 if (std::find(producerCacheList_.begin(), producerCacheList_.end(), id) == producerCacheList_.end()) {
398 return false;
399 }
400 }
401 return true;
402 }
403
ReallocBufferLocked(const BufferRequestConfig & config,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)404 GSError BufferQueue::ReallocBufferLocked(const BufferRequestConfig &config,
405 struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
406 {
407 DeleteBufferInCacheNoWaitForAllocatingState(retval.sequence);
408
409 sptr<SurfaceBuffer> buffer = nullptr;
410 auto sret = AllocBuffer(buffer, config, lock);
411 if (sret != GSERROR_OK) {
412 BLOGE("AllocBuffer failed: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
413 return sret;
414 }
415
416 retval.buffer = buffer;
417 retval.sequence = buffer->GetSeqNum();
418 bufferQueueCache_[retval.sequence].config = config;
419 return GSERROR_OK;
420 }
421
AddDeletingBuffersLocked(std::vector<uint32_t> & deletingBuffers)422 void BufferQueue::AddDeletingBuffersLocked(std::vector<uint32_t> &deletingBuffers)
423 {
424 deletingBuffers.reserve(deletingBuffers.size() + deletingList_.size());
425 deletingBuffers.insert(deletingBuffers.end(), deletingList_.begin(), deletingList_.end());
426 deletingList_.clear();
427 }
428
ReuseBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)429 GSError BufferQueue::ReuseBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
430 struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
431 {
432 if (retval.buffer == nullptr) {
433 BLOGE("input buffer is null, uniqueId: %{public}" PRIu64 ".", uniqueId_);
434 return SURFACE_ERROR_UNKOWN;
435 }
436 retval.sequence = retval.buffer->GetSeqNum();
437 if (bufferQueueCache_.find(retval.sequence) == bufferQueueCache_.end()) {
438 BLOGE("cache not find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", retval.sequence, uniqueId_);
439 return SURFACE_ERROR_UNKOWN;
440 }
441 auto &cacheConfig = bufferQueueCache_[retval.sequence].config;
442 SURFACE_TRACE_NAME_FMT("ReuseBuffer config width: %d height: %d usage: %llu format: %d id: %u",
443 cacheConfig.width, cacheConfig.height, cacheConfig.usage, cacheConfig.format, retval.sequence);
444
445 bool needRealloc = (config != bufferQueueCache_[retval.sequence].config);
446 // config, realloc
447 if (needRealloc) {
448 auto sret = ReallocBufferLocked(config, retval, lock);
449 if (sret != GSERROR_OK) {
450 return sret;
451 }
452 }
453
454 bufferQueueCache_[retval.sequence].state = BUFFER_STATE_REQUESTED;
455 retval.fence = bufferQueueCache_[retval.sequence].fence;
456 bedata = retval.buffer->GetExtraData();
457 SetSurfaceBufferHebcMetaLocked(retval.buffer);
458 SetSurfaceBufferGlobalAlphaUnlocked(retval.buffer);
459
460 auto &dbs = retval.deletingBuffers;
461 AddDeletingBuffersLocked(dbs);
462
463 if (needRealloc || producerCacheClean_ || retval.buffer->GetConsumerAttachBufferFlag()) {
464 if (producerCacheClean_) {
465 producerCacheList_.push_back(retval.sequence);
466 if (CheckProducerCacheListLocked()) {
467 SetProducerCacheCleanFlagLocked(false, lock);
468 }
469 }
470 retval.buffer->SetConsumerAttachBufferFlag(false);
471 } else {
472 retval.buffer = nullptr;
473 }
474
475 SURFACE_TRACE_NAME_FMT("%s:%u", name_.c_str(), retval.sequence);
476 if (IsTagEnabled(HITRACE_TAG_GRAPHIC_AGP) && isLocalRender_) {
477 static SyncFenceTracker releaseFenceThread("Release Fence");
478 releaseFenceThread.TrackFence(retval.fence);
479 }
480 return GSERROR_OK;
481 }
482
CancelBufferLocked(uint32_t sequence,sptr<BufferExtraData> bedata)483 GSError BufferQueue::CancelBufferLocked(uint32_t sequence, sptr<BufferExtraData> bedata)
484 {
485 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
486 return SURFACE_ERROR_BUFFER_NOT_INCACHE;
487 }
488
489 if (bufferQueueCache_[sequence].state != BUFFER_STATE_REQUESTED &&
490 bufferQueueCache_[sequence].state != BUFFER_STATE_ATTACHED) {
491 return SURFACE_ERROR_BUFFER_STATE_INVALID;
492 }
493 bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
494 freeList_.push_back(sequence);
495 if (bufferQueueCache_[sequence].buffer == nullptr) {
496 BLOGE("cache buffer is nullptr, sequence:%{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
497 return SURFACE_ERROR_UNKOWN;
498 }
499 bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
500
501 waitReqCon_.notify_all();
502 waitAttachCon_.notify_all();
503
504 return GSERROR_OK;
505 }
506
CancelBuffer(uint32_t sequence,sptr<BufferExtraData> bedata)507 GSError BufferQueue::CancelBuffer(uint32_t sequence, sptr<BufferExtraData> bedata)
508 {
509 SURFACE_TRACE_NAME_FMT("CancelBuffer name: %s queueId: %" PRIu64 " sequence: %u",
510 name_.c_str(), uniqueId_, sequence);
511 std::lock_guard<std::mutex> lockGuard(mutex_);
512 return CancelBufferLocked(sequence, bedata);
513 }
514
CheckBufferQueueCacheLocked(uint32_t sequence)515 GSError BufferQueue::CheckBufferQueueCacheLocked(uint32_t sequence)
516 {
517 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
518 BLOGE("no find seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
519 return SURFACE_ERROR_BUFFER_NOT_INCACHE;
520 }
521
522 auto &state = bufferQueueCache_[sequence].state;
523 if (state != BUFFER_STATE_REQUESTED && state != BUFFER_STATE_ATTACHED) {
524 BLOGE("seq: %{public}u, invalid state %{public}d, uniqueId: %{public}" PRIu64 ".",
525 sequence, state, uniqueId_);
526 return SURFACE_ERROR_BUFFER_STATE_INVALID;
527 }
528 return GSERROR_OK;
529 }
530
CheckBufferQueueCache(uint32_t sequence)531 GSError BufferQueue::CheckBufferQueueCache(uint32_t sequence)
532 {
533 std::lock_guard<std::mutex> lockGuard(mutex_);
534 return CheckBufferQueueCacheLocked(sequence);
535 }
536
DelegatorQueueBuffer(uint32_t sequence,sptr<SyncFence> fence)537 GSError BufferQueue::DelegatorQueueBuffer(uint32_t sequence, sptr<SyncFence> fence)
538 {
539 auto consumerDelegator = wpCSurfaceDelegator_.promote();
540 if (consumerDelegator == nullptr) {
541 BLOGE("Consumer surface delegator has been expired");
542 return GSERROR_INVALID_ARGUMENTS;
543 }
544 sptr<SurfaceBuffer> buffer = nullptr;
545 {
546 std::lock_guard<std::mutex> lockGuard(mutex_);
547 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
548 return GSERROR_NO_ENTRY;
549 }
550 bufferQueueCache_[sequence].state = BUFFER_STATE_ACQUIRED;
551 buffer = bufferQueueCache_[sequence].buffer;
552 }
553 GSError ret = consumerDelegator->QueueBuffer(buffer, fence->Get());
554 if (ret != GSERROR_OK) {
555 BLOGE("Consumer surface delegator failed to queuebuffer");
556 }
557 ret = ReleaseBuffer(buffer, SyncFence::InvalidFence());
558 if (ret != GSERROR_OK) {
559 BLOGE("Consumer surface delegator failed to releasebuffer");
560 }
561 return ret;
562 }
563
CallConsumerListener()564 void BufferQueue::CallConsumerListener()
565 {
566 SURFACE_TRACE_NAME_FMT("CallConsumerListener");
567 sptr<IBufferConsumerListener> listener;
568 IBufferConsumerListenerClazz *listenerClazz;
569 {
570 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
571 listener = listener_;
572 listenerClazz = listenerClazz_;
573 }
574 if (listener != nullptr) {
575 listener->OnBufferAvailable();
576 } else if (listenerClazz != nullptr) {
577 listenerClazz->OnBufferAvailable();
578 }
579 }
580
FlushBuffer(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config)581 GSError BufferQueue::FlushBuffer(uint32_t sequence, sptr<BufferExtraData> bedata,
582 sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config)
583 {
584 SURFACE_TRACE_NAME_FMT("FlushBuffer name: %s queueId: %" PRIu64 " sequence: %u",
585 name_.c_str(), uniqueId_, sequence);
586 {
587 std::lock_guard<std::mutex> lockGuard(mutex_);
588 if (!GetStatusLocked()) {
589 SURFACE_TRACE_NAME_FMT("status: %d", GetStatusLocked());
590 BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
591 }
592 }
593 // check param
594 auto sret = CheckFlushConfig(config);
595 if (sret != GSERROR_OK) {
596 BLOGE("CheckFlushConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
597 return sret;
598 }
599
600 sret = CheckBufferQueueCache(sequence);
601 if (sret != GSERROR_OK) {
602 return sret;
603 }
604
605 bool listenerNullCheck = false;
606 {
607 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
608 if (listener_ == nullptr && listenerClazz_ == nullptr) {
609 listenerNullCheck = true;
610 }
611 }
612 if (listenerNullCheck) {
613 SURFACE_TRACE_NAME("listener is nullptr");
614 BLOGE("listener is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
615 CancelBuffer(sequence, bedata);
616 return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
617 }
618
619 sret = DoFlushBuffer(sequence, bedata, fence, config);
620 if (sret != GSERROR_OK) {
621 return sret;
622 }
623 CallConsumerListener();
624
625 if (wpCSurfaceDelegator_ != nullptr) {
626 sret = DelegatorQueueBuffer(sequence, fence);
627 }
628 return sret;
629 }
630
GetLastFlushedBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,float matrix[16],uint32_t matrixSize,bool isUseNewMatrix,bool needRecordSequence)631 GSError BufferQueue::GetLastFlushedBuffer(sptr<SurfaceBuffer>& buffer,
632 sptr<SyncFence>& fence, float matrix[16], uint32_t matrixSize, bool isUseNewMatrix, bool needRecordSequence)
633 {
634 std::lock_guard<std::mutex> lockGuard(mutex_);
635 if (needRecordSequence && acquireLastFlushedBufSequence_ != INVALID_SEQUENCE) {
636 BLOGE("last flushed buffer(%{public}d) is using, uniqueId: %{public}" PRIu64 ".",
637 acquireLastFlushedBufSequence_, uniqueId_);
638 return SURFACE_ERROR_BUFFER_STATE_INVALID;
639 }
640 if (bufferQueueCache_.find(lastFlusedSequence_) == bufferQueueCache_.end()) {
641 BLOGE("cache ont find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", lastFlusedSequence_, uniqueId_);
642 return SURFACE_ERROR_UNKOWN;
643 }
644 auto &state = bufferQueueCache_[lastFlusedSequence_].state;
645 if (state == BUFFER_STATE_REQUESTED) {
646 BLOGE("seq: %{public}u, invalid state %{public}d, uniqueId: %{public}" PRIu64 ".",
647 lastFlusedSequence_, state, uniqueId_);
648 return SURFACE_ERROR_BUFFER_STATE_INVALID;
649 }
650 buffer = bufferQueueCache_[lastFlusedSequence_].buffer;
651 auto usage = buffer->GetUsage();
652 if (usage & BUFFER_USAGE_PROTECTED) {
653 BLOGE("lastFlusedSeq: %{public}u, usage: %{public}" PRIu64 ", uniqueId: %{public}" PRIu64 ".",
654 lastFlusedSequence_, usage, uniqueId_);
655 return SURFACE_ERROR_NOT_SUPPORT;
656 }
657
658 fence = lastFlusedFence_;
659 Rect damage = {};
660 damage.w = buffer->GetWidth();
661 damage.h = buffer->GetHeight();
662
663 auto utils = SurfaceUtils::GetInstance();
664 if (isUseNewMatrix) {
665 utils->ComputeTransformMatrixV2(matrix, matrixSize, buffer, lastFlushedTransform_, damage);
666 } else {
667 utils->ComputeTransformMatrix(matrix, matrixSize, buffer, lastFlushedTransform_, damage);
668 }
669
670 if (needRecordSequence) {
671 acquireLastFlushedBufSequence_ = lastFlusedSequence_;
672 SURFACE_TRACE_NAME_FMT("GetLastFlushedBuffer(needRecordSequence) name: %s queueId: %" PRIu64 " seq: %u",
673 name_.c_str(), uniqueId_, acquireLastFlushedBufSequence_);
674 }
675 return GSERROR_OK;
676 }
677
AcquireLastFlushedBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,float matrix[16],uint32_t matrixSize,bool isUseNewMatrix)678 GSError BufferQueue::AcquireLastFlushedBuffer(sptr<SurfaceBuffer> &buffer, sptr<SyncFence> &fence,
679 float matrix[16], uint32_t matrixSize, bool isUseNewMatrix)
680 {
681 return GetLastFlushedBuffer(buffer, fence, matrix, matrixSize, isUseNewMatrix, true);
682 }
683
ReleaseLastFlushedBuffer(uint32_t sequence)684 GSError BufferQueue::ReleaseLastFlushedBuffer(uint32_t sequence)
685 {
686 SURFACE_TRACE_NAME_FMT("ReleaseLastFlushedBuffer name: %s queueId: %" PRIu64 " seq: %u",
687 name_.c_str(), uniqueId_, sequence);
688 std::lock_guard<std::mutex> lockGuard(mutex_);
689 if (acquireLastFlushedBufSequence_ == INVALID_SEQUENCE || acquireLastFlushedBufSequence_ != sequence) {
690 BLOGE("ReleaseLastFlushedBuffer lastFlushBuffer:%{public}d sequence:%{public}d, uniqueId: %{public}" PRIu64,
691 acquireLastFlushedBufSequence_, sequence, uniqueId_);
692 return SURFACE_ERROR_BUFFER_STATE_INVALID;
693 }
694 acquireLastFlushedBufSequence_ = INVALID_SEQUENCE;
695 waitReqCon_.notify_all();
696 return GSERROR_OK;
697 }
698
DoFlushBufferLocked(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config,std::unique_lock<std::mutex> & lock)699 GSError BufferQueue::DoFlushBufferLocked(uint32_t sequence, sptr<BufferExtraData> bedata,
700 sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config, std::unique_lock<std::mutex> &lock)
701 {
702 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
703 BLOGE("bufferQueueCache not find sequence:%{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
704 return SURFACE_ERROR_BUFFER_NOT_INCACHE;
705 }
706 if (bufferQueueCache_[sequence].isDeleting) {
707 DeleteBufferInCache(sequence, lock);
708 BLOGD("DoFlushBuffer delete seq: %{public}d, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
709 CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
710 return GSERROR_OK;
711 }
712
713 bufferQueueCache_[sequence].buffer->SetExtraData(bedata);
714 int32_t supportFastCompose = 0;
715 bufferQueueCache_[sequence].buffer->GetExtraData()->ExtraGet(
716 BUFFER_SUPPORT_FASTCOMPOSE, supportFastCompose);
717 bufferQueueCache_[sequence].buffer->SetSurfaceBufferTransform(transform_);
718
719 uint64_t usage = static_cast<uint32_t>(bufferQueueCache_[sequence].config.usage);
720 if (usage & BUFFER_USAGE_CPU_WRITE) {
721 // api flush
722 auto sret = bufferQueueCache_[sequence].buffer->FlushCache();
723 if (sret != GSERROR_OK) {
724 BLOGE("FlushCache ret: %{public}d, seq: %{public}u, uniqueId: %{public}" PRIu64 ".",
725 sret, sequence, uniqueId_);
726 return sret;
727 }
728 }
729 // if failed, avoid to state rollback
730 bufferQueueCache_[sequence].state = BUFFER_STATE_FLUSHED;
731 bufferQueueCache_[sequence].fence = fence;
732 bufferQueueCache_[sequence].damages = config.damages;
733 dirtyList_.push_back(sequence);
734 lastFlusedSequence_ = sequence;
735 lastFlusedFence_ = fence;
736 lastFlushedTransform_ = transform_;
737 bufferSupportFastCompose_ = (bool)supportFastCompose;
738
739 SetDesiredPresentTimestampAndUiTimestamp(sequence, config.desiredPresentTimestamp, config.timestamp);
740 lastFlushedDesiredPresentTimeStamp_ = bufferQueueCache_[sequence].desiredPresentTimestamp;
741 bool traceTag = IsTagEnabled(HITRACE_TAG_GRAPHIC_AGP);
742 if (isLocalRender_) {
743 AcquireFenceTracker::TrackFence(fence, traceTag);
744 }
745 // if you need dump SurfaceBuffer to file, you should execute hdc shell param set persist.dumpbuffer.enabled 1
746 // and reboot your device
747 static bool dumpBufferEnabled = system::GetParameter("persist.dumpbuffer.enabled", "0") != "0";
748 if (dumpBufferEnabled) {
749 // Wait for the status of the fence to change to SIGNALED.
750 fence->Wait(-1);
751 DumpToFileAsync(GetRealPid(), name_, bufferQueueCache_[sequence].buffer);
752 }
753
754 CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
755 return GSERROR_OK;
756 }
757
DoFlushBuffer(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config)758 GSError BufferQueue::DoFlushBuffer(uint32_t sequence, sptr<BufferExtraData> bedata,
759 sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config)
760 {
761 SURFACE_TRACE_NAME_FMT("DoFlushBuffer name: %s queueId: %" PRIu64 " seq: %u",
762 name_.c_str(), uniqueId_, sequence);
763 std::unique_lock<std::mutex> lock(mutex_);
764 return DoFlushBufferLocked(sequence, bedata, fence, config, lock);
765 }
766
SetDesiredPresentTimestampAndUiTimestamp(uint32_t sequence,int64_t desiredPresentTimestamp,uint64_t uiTimestamp)767 void BufferQueue::SetDesiredPresentTimestampAndUiTimestamp(uint32_t sequence, int64_t desiredPresentTimestamp,
768 uint64_t uiTimestamp)
769 {
770 bufferQueueCache_[sequence].isAutoTimestamp = false;
771 if (desiredPresentTimestamp <= 0) {
772 if (desiredPresentTimestamp == 0 && uiTimestamp != 0
773 && uiTimestamp <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
774 bufferQueueCache_[sequence].desiredPresentTimestamp = static_cast<int64_t>(uiTimestamp);
775 } else {
776 bufferQueueCache_[sequence].desiredPresentTimestamp = std::chrono::duration_cast<std::chrono::nanoseconds>(
777 std::chrono::steady_clock::now().time_since_epoch()).count();
778 bufferQueueCache_[sequence].isAutoTimestamp = true;
779 }
780 } else {
781 bufferQueueCache_[sequence].desiredPresentTimestamp = desiredPresentTimestamp;
782 }
783 bufferQueueCache_[sequence].timestamp = static_cast<int64_t>(uiTimestamp);
784 }
785
LogAndTraceAllBufferInBufferQueueCache()786 void BufferQueue::LogAndTraceAllBufferInBufferQueueCache()
787 {
788 std::map<BufferState, int32_t> bufferState;
789 for (auto &[id, ele] : bufferQueueCache_) {
790 SURFACE_TRACE_NAME_FMT("acquire buffer id: %d state: %d desiredPresentTimestamp: %" PRId64
791 " isAotuTimestamp: %d", id, ele.state, ele.desiredPresentTimestamp, ele.isAutoTimestamp);
792 bufferState[ele.state] += 1;
793 }
794 std::string str = std::to_string(uniqueId_) +
795 ", Released: " + std::to_string(bufferState[BUFFER_STATE_RELEASED]) +
796 " Requested: " + std::to_string(bufferState[BUFFER_STATE_REQUESTED]) +
797 " Flushed: " + std::to_string(bufferState[BUFFER_STATE_FLUSHED]) +
798 " Acquired: " + std::to_string(bufferState[BUFFER_STATE_ACQUIRED]);
799 if (str.compare(acquireBufferStateStr_) != 0) {
800 acquireBufferStateStr_ = str;
801 BLOGE("there is no dirty buffer or no dirty buffer ready, uniqueId: %{public}s", str.c_str());
802 }
803 }
804
AcquireBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,std::vector<Rect> & damages)805 GSError BufferQueue::AcquireBuffer(sptr<SurfaceBuffer> &buffer,
806 sptr<SyncFence> &fence, int64_t ×tamp, std::vector<Rect> &damages)
807 {
808 SURFACE_TRACE_NAME_FMT("AcquireBuffer name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
809 // dequeue from dirty list
810 std::lock_guard<std::mutex> lockGuard(mutex_);
811 GSError ret = PopFromDirtyListLocked(buffer);
812 if (ret == GSERROR_OK) {
813 uint32_t sequence = buffer->GetSeqNum();
814 bufferQueueCache_[sequence].state = BUFFER_STATE_ACQUIRED;
815 bufferQueueCache_[sequence].lastAcquireTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
816 std::chrono::steady_clock::now().time_since_epoch()).count();
817
818 fence = bufferQueueCache_[sequence].fence;
819 timestamp = bufferQueueCache_[sequence].timestamp;
820 damages = bufferQueueCache_[sequence].damages;
821 SURFACE_TRACE_NAME_FMT("acquire buffer sequence: %u desiredPresentTimestamp: %" PRId64 " isAotuTimestamp: %d",
822 sequence, bufferQueueCache_[sequence].desiredPresentTimestamp,
823 bufferQueueCache_[sequence].isAutoTimestamp);
824 } else if (ret == GSERROR_NO_BUFFER) {
825 LogAndTraceAllBufferInBufferQueueCache();
826 }
827
828 CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
829 return ret;
830 }
831
AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue & returnValue,int64_t expectPresentTimestamp,bool isUsingAutoTimestamp)832 GSError BufferQueue::AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue &returnValue,
833 int64_t expectPresentTimestamp, bool isUsingAutoTimestamp)
834 {
835 SURFACE_TRACE_NAME_FMT("AcquireBuffer with PresentTimestamp name: %s queueId: %" PRIu64 " queueSize: %u"
836 "expectPresentTimestamp: %" PRId64, name_.c_str(), uniqueId_, bufferQueueSize_, expectPresentTimestamp);
837 if (expectPresentTimestamp <= 0) {
838 return AcquireBuffer(returnValue.buffer, returnValue.fence, returnValue.timestamp, returnValue.damages);
839 }
840 std::vector<BufferAndFence> dropBuffers;
841 {
842 std::lock_guard<std::mutex> lockGuard(mutex_);
843 std::list<uint32_t>::iterator frontSequence = dirtyList_.begin();
844 if (frontSequence == dirtyList_.end()) {
845 LogAndTraceAllBufferInBufferQueueCache();
846 return GSERROR_NO_BUFFER;
847 }
848 int64_t frontDesiredPresentTimestamp = bufferQueueCache_[*frontSequence].desiredPresentTimestamp;
849 bool frontIsAutoTimestamp = bufferQueueCache_[*frontSequence].isAutoTimestamp;
850 if (!frontIsAutoTimestamp && frontDesiredPresentTimestamp > expectPresentTimestamp
851 && frontDesiredPresentTimestamp - ONE_SECOND_TIMESTAMP <= expectPresentTimestamp) {
852 SURFACE_TRACE_NAME_FMT("Acquire no buffer ready");
853 LogAndTraceAllBufferInBufferQueueCache();
854 return GSERROR_NO_BUFFER_READY;
855 }
856 while (!(frontIsAutoTimestamp && !isUsingAutoTimestamp)
857 && frontDesiredPresentTimestamp <= expectPresentTimestamp) {
858 BufferElement& frontBufferElement = bufferQueueCache_[*frontSequence];
859 if (++frontSequence == dirtyList_.end()) {
860 BLOGD("Buffer seq(%{public}d) is the last buffer, do acquire.", dirtyList_.front());
861 break;
862 }
863 BufferElement& secondBufferElement = bufferQueueCache_[*frontSequence];
864 if ((secondBufferElement.isAutoTimestamp && !isUsingAutoTimestamp)
865 || secondBufferElement.desiredPresentTimestamp > expectPresentTimestamp) {
866 BLOGD("Next dirty buffer desiredPresentTimestamp: %{public}" PRId64 " not match expectPresentTimestamp"
867 ": %{public}" PRId64 ".", secondBufferElement.desiredPresentTimestamp, expectPresentTimestamp);
868 break;
869 }
870 SURFACE_TRACE_NAME_FMT("DropBuffer name: %s queueId: %" PRIu64 " ,buffer seq: %u , buffer "
871 "desiredPresentTimestamp: %" PRId64 " acquire expectPresentTimestamp: %" PRId64, name_.c_str(),
872 uniqueId_, frontBufferElement.buffer->GetSeqNum(), frontBufferElement.desiredPresentTimestamp,
873 expectPresentTimestamp);
874 DropFirstDirtyBuffer(frontBufferElement, secondBufferElement, frontDesiredPresentTimestamp,
875 frontIsAutoTimestamp, dropBuffers);
876 }
877 if (!frontIsAutoTimestamp && !IsPresentTimestampReady(frontDesiredPresentTimestamp, expectPresentTimestamp)) {
878 SURFACE_TRACE_NAME_FMT("Acquire no buffer ready");
879 LogAndTraceAllBufferInBufferQueueCache();
880 return GSERROR_NO_BUFFER_READY;
881 }
882 }
883 ReleaseDropBuffers(dropBuffers);
884 return AcquireBuffer(returnValue.buffer, returnValue.fence, returnValue.timestamp, returnValue.damages);
885 }
886
DropFirstDirtyBuffer(BufferElement & frontBufferElement,BufferElement & secondBufferElement,int64_t & frontDesiredPresentTimestamp,bool & frontIsAutoTimestamp,std::vector<BufferAndFence> & dropBuffers)887 void BufferQueue::DropFirstDirtyBuffer(BufferElement &frontBufferElement, BufferElement &secondBufferElement,
888 int64_t &frontDesiredPresentTimestamp, bool &frontIsAutoTimestamp,
889 std::vector<BufferAndFence> &dropBuffers)
890 {
891 dirtyList_.pop_front();
892 frontBufferElement.state = BUFFER_STATE_ACQUIRED;
893 dropBuffers.emplace_back(frontBufferElement.buffer, frontBufferElement.fence);
894 frontDesiredPresentTimestamp = secondBufferElement.desiredPresentTimestamp;
895 frontIsAutoTimestamp = secondBufferElement.isAutoTimestamp;
896 }
897
ReleaseDropBuffers(std::vector<BufferAndFence> & dropBuffers)898 void BufferQueue::ReleaseDropBuffers(std::vector<BufferAndFence> &dropBuffers)
899 {
900 for (auto& dropBuffer : dropBuffers) {
901 auto ret = ReleaseBuffer(dropBuffer.first, dropBuffer.second);
902 if (ret != GSERROR_OK) {
903 BLOGE("DropBuffer failed, ret: %{public}d, sequeue: %{public}u, uniqueId: %{public}" PRIu64 ".",
904 ret, dropBuffer.first->GetSeqNum(), uniqueId_);
905 }
906 }
907 }
908
IsPresentTimestampReady(int64_t desiredPresentTimestamp,int64_t expectPresentTimestamp)909 bool BufferQueue::IsPresentTimestampReady(int64_t desiredPresentTimestamp, int64_t expectPresentTimestamp)
910 {
911 return isBufferUtilPresentTimestampReady(desiredPresentTimestamp, expectPresentTimestamp);
912 }
913
ListenerBufferReleasedCb(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)914 void BufferQueue::ListenerBufferReleasedCb(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence> &fence)
915 {
916 {
917 std::lock_guard<std::mutex> lockGuard(onBufferReleaseMutex_);
918 if (onBufferRelease_ != nullptr) {
919 SURFACE_TRACE_NAME_FMT("OnBufferRelease_ sequence: %u", buffer->GetSeqNum());
920 sptr<SurfaceBuffer> buf = buffer;
921 (void)onBufferRelease_(buf);
922 }
923 }
924
925 sptr<IProducerListener> listener;
926 sptr<IProducerListener> listenerBackup;
927 {
928 std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
929 listener = producerListener_;
930 listenerBackup = producerListenerBackup_;
931 }
932
933 if (listener != nullptr) {
934 SURFACE_TRACE_NAME_FMT("onBufferReleasedForProducer sequence: %u", buffer->GetSeqNum());
935 if (listener->OnBufferReleased() != GSERROR_OK) {
936 BLOGE("seq: %{public}u, OnBufferReleased faile, uniqueId: %{public}" PRIu64 ".",
937 buffer->GetSeqNum(), uniqueId_);
938 }
939 }
940
941 if (listenerBackup != nullptr) {
942 SURFACE_TRACE_NAME_FMT("onBufferReleasedBackupForProducer sequence: %u", buffer->GetSeqNum());
943 if (listenerBackup->OnBufferReleasedWithFence(buffer, fence) != GSERROR_OK) {
944 BLOGE("seq: %{public}u, OnBufferReleasedWithFence failed, uniqueId: %{public}" PRIu64 ".",
945 buffer->GetSeqNum(), uniqueId_);
946 }
947 }
948 std::lock_guard<std::mutex> lockGuard(mutex_);
949 OnBufferDeleteCbForHardwareThreadLocked(buffer);
950 }
951
OnBufferDeleteCbForHardwareThreadLocked(const sptr<SurfaceBuffer> & buffer) const952 void BufferQueue::OnBufferDeleteCbForHardwareThreadLocked(const sptr<SurfaceBuffer> &buffer) const
953 {
954 if (onBufferDeleteForRSHardwareThread_ != nullptr) {
955 onBufferDeleteForRSHardwareThread_(buffer->GetSeqNum());
956 }
957 }
958
ReleaseBuffer(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)959 GSError BufferQueue::ReleaseBuffer(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence>& fence)
960 {
961 if (buffer == nullptr) {
962 return GSERROR_INVALID_ARGUMENTS;
963 }
964
965 uint32_t sequence = buffer->GetSeqNum();
966 SURFACE_TRACE_NAME_FMT("ReleaseBuffer name: %s queueId: %" PRIu64 " seq: %u", name_.c_str(), uniqueId_, sequence);
967 {
968 std::unique_lock<std::mutex> lock(mutex_);
969 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
970 SURFACE_TRACE_NAME_FMT("buffer not found in cache");
971 BLOGE("cache not find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
972 OnBufferDeleteCbForHardwareThreadLocked(buffer);
973 return SURFACE_ERROR_BUFFER_NOT_INCACHE;
974 }
975
976 const auto &state = bufferQueueCache_[sequence].state;
977 if (state != BUFFER_STATE_ACQUIRED && state != BUFFER_STATE_ATTACHED) {
978 SURFACE_TRACE_NAME_FMT("invalid state: %u", state);
979 BLOGD("invalid state: %{public}d, uniqueId: %{public}" PRIu64 ".", state, uniqueId_);
980 return SURFACE_ERROR_BUFFER_STATE_INVALID;
981 }
982
983 bufferQueueCache_[sequence].state = BUFFER_STATE_RELEASED;
984 bufferQueueCache_[sequence].fence = fence;
985 int64_t now = std::chrono::duration_cast<std::chrono::nanoseconds>(
986 std::chrono::steady_clock::now().time_since_epoch()).count();
987 lastConsumeTime_ = now - bufferQueueCache_[sequence].lastAcquireTime;
988
989 if (bufferQueueCache_[sequence].isDeleting) {
990 DeleteBufferInCache(sequence, lock);
991 } else {
992 freeList_.push_back(sequence);
993 }
994 waitReqCon_.notify_all();
995 waitAttachCon_.notify_all();
996 }
997 ListenerBufferReleasedCb(buffer, fence);
998
999 return GSERROR_OK;
1000 }
1001
AllocBuffer(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config,std::unique_lock<std::mutex> & lock)1002 GSError BufferQueue::AllocBuffer(sptr<SurfaceBuffer> &buffer,
1003 const BufferRequestConfig &config, std::unique_lock<std::mutex> &lock)
1004 {
1005 sptr<SurfaceBuffer> bufferImpl = new SurfaceBufferImpl();
1006 uint32_t sequence = bufferImpl->GetSeqNum();
1007 SURFACE_TRACE_NAME_FMT("AllocBuffer name: %s queueId: %" PRIu64 ", config width: %d height: %d usage: %llu format:"
1008 " %d id: %u", name_.c_str(), uniqueId_, config.width, config.height, config.usage, config.format, sequence);
1009 ScalingMode scalingMode = scalingMode_;
1010 int32_t connectedPid = connectedPid_;
1011 isAllocatingBuffer_ = true;
1012 lock.unlock();
1013 GSError ret = bufferImpl->Alloc(config);
1014 lock.lock();
1015 isAllocatingBuffer_ = false;
1016 isAllocatingBufferCon_.notify_all();
1017 if (ret != GSERROR_OK) {
1018 BLOGE("Alloc failed, sequence:%{public}u, ret:%{public}d, uniqueId: %{public}" PRIu64 ".",
1019 sequence, ret, uniqueId_);
1020 return SURFACE_ERROR_UNKOWN;
1021 }
1022
1023 bufferImpl->SetSurfaceBufferScalingMode(scalingMode);
1024 BufferElement ele = {
1025 .buffer = bufferImpl,
1026 .state = BUFFER_STATE_REQUESTED,
1027 .isDeleting = false,
1028 .config = config,
1029 .fence = SyncFence::InvalidFence(),
1030 };
1031
1032 if (config.usage & BUFFER_USAGE_PROTECTED) {
1033 BLOGD("usage is BUFFER_USAGE_PROTECTED, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1034 bufferQueueCache_[sequence] = ele;
1035 buffer = bufferImpl;
1036 return SURFACE_ERROR_OK;
1037 }
1038
1039 ret = bufferImpl->Map();
1040 if (ret == GSERROR_OK) {
1041 bufferQueueCache_[sequence] = ele;
1042 buffer = bufferImpl;
1043 } else {
1044 BLOGE("Map failed, seq:%{public}u, ret:%{public}d, uniqueId: %{public}" PRIu64 ".",
1045 sequence, ret, uniqueId_);
1046 return SURFACE_ERROR_UNKOWN;
1047 }
1048
1049 BufferHandle* bufferHandle = bufferImpl->GetBufferHandle();
1050 if (connectedPid != 0 && bufferHandle != nullptr) {
1051 ioctl(bufferHandle->fd, DMA_BUF_SET_NAME_A, std::to_string(connectedPid).c_str());
1052 }
1053
1054 return SURFACE_ERROR_OK;
1055 }
1056
OnBufferDeleteForRS(uint32_t sequence)1057 void BufferQueue::OnBufferDeleteForRS(uint32_t sequence)
1058 {
1059 auto buffer = bufferQueueCache_[sequence].buffer;
1060 if (buffer) {
1061 buffer->SetBufferDeleteFromCacheFlag(true);
1062 }
1063 if (onBufferDeleteForRSMainThread_ != nullptr) {
1064 onBufferDeleteForRSMainThread_(sequence);
1065 }
1066 if (onBufferDeleteForRSHardwareThread_ != nullptr) {
1067 onBufferDeleteForRSHardwareThread_(sequence);
1068 }
1069 }
1070
DeleteBufferInCacheNoWaitForAllocatingState(uint32_t sequence)1071 void BufferQueue::DeleteBufferInCacheNoWaitForAllocatingState(uint32_t sequence)
1072 {
1073 auto it = bufferQueueCache_.find(sequence);
1074 if (it != bufferQueueCache_.end()) {
1075 OnBufferDeleteForRS(sequence);
1076 BLOGD("DeleteBufferInCache seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1077 bufferQueueCache_.erase(it);
1078 deletingList_.push_back(sequence);
1079 }
1080 }
1081
DeleteBufferInCache(uint32_t sequence,std::unique_lock<std::mutex> & lock)1082 void BufferQueue::DeleteBufferInCache(uint32_t sequence, std::unique_lock<std::mutex> &lock)
1083 {
1084 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1085 DeleteBufferInCacheNoWaitForAllocatingState(sequence);
1086 }
1087
GetQueueSize()1088 uint32_t BufferQueue::GetQueueSize()
1089 {
1090 std::unique_lock<std::mutex> lock(mutex_);
1091 return bufferQueueSize_;
1092 }
1093
DeleteBuffersLocked(int32_t count,std::unique_lock<std::mutex> & lock)1094 void BufferQueue::DeleteBuffersLocked(int32_t count, std::unique_lock<std::mutex> &lock)
1095 {
1096 SURFACE_TRACE_NAME_FMT("DeleteBuffersLocked count: %d", count);
1097 if (count <= 0) {
1098 return;
1099 }
1100
1101 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1102 while (!freeList_.empty()) {
1103 DeleteBufferInCacheNoWaitForAllocatingState(freeList_.front());
1104 freeList_.pop_front();
1105 count--;
1106 if (count <= 0) {
1107 return;
1108 }
1109 }
1110
1111 while (!dirtyList_.empty()) {
1112 DeleteBufferInCacheNoWaitForAllocatingState(dirtyList_.front());
1113 dirtyList_.pop_front();
1114 count--;
1115 if (count <= 0) {
1116 return;
1117 }
1118 }
1119
1120 for (auto&& ele : bufferQueueCache_) {
1121 ele.second.isDeleting = true;
1122 // we don't have to do anything
1123 count--;
1124 if (count <= 0) {
1125 break;
1126 }
1127 }
1128 }
1129
AttachBufferUpdateStatus(std::unique_lock<std::mutex> & lock,uint32_t sequence,int32_t timeOut)1130 GSError BufferQueue::AttachBufferUpdateStatus(std::unique_lock<std::mutex> &lock, uint32_t sequence, int32_t timeOut)
1131 {
1132 BufferState state = bufferQueueCache_[sequence].state;
1133 if (state == BUFFER_STATE_RELEASED) {
1134 bufferQueueCache_[sequence].state = BUFFER_STATE_ATTACHED;
1135 } else {
1136 waitAttachCon_.wait_for(lock, std::chrono::milliseconds(timeOut),
1137 [this, sequence]() { return (bufferQueueCache_[sequence].state == BUFFER_STATE_RELEASED); });
1138 if (bufferQueueCache_[sequence].state == BUFFER_STATE_RELEASED) {
1139 bufferQueueCache_[sequence].state = BUFFER_STATE_ATTACHED;
1140 } else {
1141 BLOGN_FAILURE_RET(SURFACE_ERROR_BUFFER_STATE_INVALID);
1142 }
1143 }
1144
1145 for (auto iter = freeList_.begin(); iter != freeList_.end(); iter++) {
1146 if (sequence == *iter) {
1147 freeList_.erase(iter);
1148 break;
1149 }
1150 }
1151 return GSERROR_OK;
1152 }
1153
AttachBufferUpdateBufferInfo(sptr<SurfaceBuffer> & buffer,bool needMap)1154 void BufferQueue::AttachBufferUpdateBufferInfo(sptr<SurfaceBuffer>& buffer, bool needMap)
1155 {
1156 if (needMap) {
1157 buffer->Map();
1158 }
1159 buffer->SetSurfaceBufferWidth(buffer->GetWidth());
1160 buffer->SetSurfaceBufferHeight(buffer->GetHeight());
1161 }
1162
AttachBufferToQueueLocked(sptr<SurfaceBuffer> buffer,InvokerType invokerType,bool needMap)1163 GSError BufferQueue::AttachBufferToQueueLocked(sptr<SurfaceBuffer> buffer, InvokerType invokerType, bool needMap)
1164 {
1165 uint32_t sequence = buffer->GetSeqNum();
1166 if (GetUsedSize() >= bufferQueueSize_) {
1167 BLOGE("seq: %{public}u, buffer queue size:%{public}u, used size:%{public}u,"
1168 "uniqueId: %{public}" PRIu64 ".", sequence, bufferQueueSize_, GetUsedSize(), uniqueId_);
1169 return SURFACE_ERROR_BUFFER_QUEUE_FULL;
1170 }
1171 if (bufferQueueCache_.find(sequence) != bufferQueueCache_.end()) {
1172 BLOGE("seq: %{public}u, buffer is already in cache, uniqueId: %{public}" PRIu64 ".",
1173 sequence, uniqueId_);
1174 return SURFACE_ERROR_BUFFER_IS_INCACHE;
1175 }
1176 buffer->SetSurfaceBufferScalingMode(scalingMode_);
1177 BufferElement ele;
1178 ele = {
1179 .buffer = buffer,
1180 .isDeleting = false,
1181 .config = buffer->GetBufferRequestConfig(),
1182 .fence = SyncFence::InvalidFence(),
1183 };
1184 if (invokerType == InvokerType::PRODUCER_INVOKER) {
1185 ele.state = BUFFER_STATE_REQUESTED;
1186 } else {
1187 ele.state = BUFFER_STATE_ACQUIRED;
1188 if (detachReserveSlotNum_ > 0) {
1189 detachReserveSlotNum_--;
1190 }
1191 }
1192 AttachBufferUpdateBufferInfo(buffer, needMap);
1193 bufferQueueCache_[sequence] = ele;
1194 return GSERROR_OK;
1195 }
1196
AttachBufferToQueue(sptr<SurfaceBuffer> buffer,InvokerType invokerType)1197 GSError BufferQueue::AttachBufferToQueue(sptr<SurfaceBuffer> buffer, InvokerType invokerType)
1198 {
1199 SURFACE_TRACE_NAME_FMT("AttachBufferToQueue name: %s queueId: %" PRIu64 " sequence: %u invokerType: %u",
1200 name_.c_str(), uniqueId_, buffer->GetSeqNum(), invokerType);
1201 std::lock_guard<std::mutex> lockGuard(mutex_);
1202 return AttachBufferToQueueLocked(buffer, invokerType, true);
1203 }
1204
DetachBufferFromQueueLocked(uint32_t sequence,InvokerType invokerType,std::unique_lock<std::mutex> & lock,bool isReserveSlot)1205 GSError BufferQueue::DetachBufferFromQueueLocked(uint32_t sequence, InvokerType invokerType,
1206 std::unique_lock<std::mutex> &lock, bool isReserveSlot)
1207 {
1208 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1209 BLOGE("seq: %{public}u, not find in cache, uniqueId: %{public}" PRIu64 ".",
1210 sequence, uniqueId_);
1211 return SURFACE_ERROR_BUFFER_NOT_INCACHE;
1212 }
1213 if (invokerType == InvokerType::PRODUCER_INVOKER) {
1214 if (bufferQueueCache_[sequence].state != BUFFER_STATE_REQUESTED) {
1215 BLOGE("seq: %{public}u, state: %{public}d, uniqueId: %{public}" PRIu64 ".",
1216 sequence, bufferQueueCache_[sequence].state, uniqueId_);
1217 return SURFACE_ERROR_BUFFER_STATE_INVALID;
1218 }
1219 OnBufferDeleteForRS(sequence);
1220 bufferQueueCache_.erase(sequence);
1221 } else {
1222 if (bufferQueueCache_[sequence].state != BUFFER_STATE_ACQUIRED) {
1223 BLOGE("seq: %{public}u, state: %{public}d, uniqueId: %{public}" PRIu64 ".",
1224 sequence, bufferQueueCache_[sequence].state, uniqueId_);
1225 return SURFACE_ERROR_BUFFER_STATE_INVALID;
1226 }
1227 DeleteBufferInCache(sequence, lock);
1228 if (isReserveSlot) {
1229 detachReserveSlotNum_++;
1230 }
1231 }
1232 return GSERROR_OK;
1233 }
1234
DetachBufferFromQueue(sptr<SurfaceBuffer> buffer,InvokerType invokerType,bool isReserveSlot)1235 GSError BufferQueue::DetachBufferFromQueue(sptr<SurfaceBuffer> buffer, InvokerType invokerType, bool isReserveSlot)
1236 {
1237 SURFACE_TRACE_NAME_FMT("DetachBufferFromQueue name: %s queueId: %" PRIu64 ""
1238 "sequence: %u invokerType: %u isReserveSlot: %u",
1239 name_.c_str(), uniqueId_, buffer->GetSeqNum(), invokerType, isReserveSlot);
1240 std::unique_lock<std::mutex> lock(mutex_);
1241 uint32_t sequence = buffer->GetSeqNum();
1242 auto ret = DetachBufferFromQueueLocked(sequence, invokerType, lock, isReserveSlot);
1243 if (ret != GSERROR_OK) {
1244 return ret;
1245 }
1246
1247 return GSERROR_OK;
1248 }
1249
AttachBuffer(sptr<SurfaceBuffer> & buffer,int32_t timeOut)1250 GSError BufferQueue::AttachBuffer(sptr<SurfaceBuffer> &buffer, int32_t timeOut)
1251 {
1252 SURFACE_TRACE_NAME_FMT("%s", __func__);
1253 {
1254 std::lock_guard<std::mutex> lockGuard(mutex_);
1255 if (!GetStatusLocked()) {
1256 BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
1257 }
1258 }
1259 {
1260 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1261 if (listener_ == nullptr && listenerClazz_ == nullptr) {
1262 BLOGN_FAILURE_RET(SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER);
1263 }
1264 }
1265
1266 if (buffer == nullptr) {
1267 BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
1268 }
1269
1270 uint32_t sequence = buffer->GetSeqNum();
1271 std::unique_lock<std::mutex> lock(mutex_);
1272 if (bufferQueueCache_.find(sequence) != bufferQueueCache_.end()) {
1273 return AttachBufferUpdateStatus(lock, sequence, timeOut);
1274 }
1275
1276 buffer->SetSurfaceBufferScalingMode(scalingMode_);
1277 BufferElement ele = {
1278 .buffer = buffer,
1279 .state = BUFFER_STATE_ATTACHED,
1280 .config = {
1281 .width = buffer->GetWidth(), .height = buffer->GetHeight(), .strideAlignment = 0x8,
1282 .format = buffer->GetFormat(), .usage = buffer->GetUsage(), .timeout = timeOut,
1283 },
1284 .damages = { { .w = buffer->GetWidth(), .h = buffer->GetHeight(), } },
1285 };
1286 AttachBufferUpdateBufferInfo(buffer, true);
1287 int32_t usedSize = static_cast<int32_t>(GetUsedSize());
1288 int32_t queueSize = static_cast<int32_t>(bufferQueueSize_);
1289 if (usedSize >= queueSize) {
1290 int32_t freeSize = static_cast<int32_t>(dirtyList_.size() + freeList_.size());
1291 if (freeSize >= usedSize - queueSize + 1) {
1292 DeleteBuffersLocked(usedSize - queueSize + 1, lock);
1293 bufferQueueCache_[sequence] = ele;
1294 return GSERROR_OK;
1295 } else {
1296 BLOGN_FAILURE_RET(GSERROR_OUT_OF_RANGE);
1297 }
1298 } else {
1299 bufferQueueCache_[sequence] = ele;
1300 return GSERROR_OK;
1301 }
1302 }
1303
DetachBuffer(sptr<SurfaceBuffer> & buffer)1304 GSError BufferQueue::DetachBuffer(sptr<SurfaceBuffer> &buffer)
1305 {
1306 SURFACE_TRACE_NAME_FMT("%s", __func__);
1307 if (buffer == nullptr) {
1308 BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
1309 }
1310
1311 std::lock_guard<std::mutex> lockGuard(mutex_);
1312 uint32_t sequence = buffer->GetSeqNum();
1313 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1314 return GSERROR_NO_ENTRY;
1315 }
1316
1317 if (bufferQueueCache_[sequence].state == BUFFER_STATE_REQUESTED) {
1318 BLOGD("DetachBuffer requested seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1319 } else if (bufferQueueCache_[sequence].state == BUFFER_STATE_ACQUIRED) {
1320 BLOGD("DetachBuffer acquired seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1321 } else {
1322 BLOGE("DetachBuffer invalid state: %{public}d, seq: %{public}u, uniqueId: %{public}" PRIu64 ".",
1323 bufferQueueCache_[sequence].state, sequence, uniqueId_);
1324 return GSERROR_NO_ENTRY;
1325 }
1326 OnBufferDeleteForRS(sequence);
1327 bufferQueueCache_.erase(sequence);
1328 return GSERROR_OK;
1329 }
1330
RegisterSurfaceDelegator(sptr<IRemoteObject> client,sptr<Surface> cSurface)1331 GSError BufferQueue::RegisterSurfaceDelegator(sptr<IRemoteObject> client, sptr<Surface> cSurface)
1332 {
1333 sptr<ConsumerSurfaceDelegator> surfaceDelegator = ConsumerSurfaceDelegator::Create();
1334 if (surfaceDelegator == nullptr) {
1335 BLOGE("Failed to register consumer delegator because the surface delegator is nullptr");
1336 return GSERROR_INVALID_ARGUMENTS;
1337 }
1338 if (!surfaceDelegator->SetClient(client)) {
1339 BLOGE("Failed to set client");
1340 return GSERROR_INVALID_ARGUMENTS;
1341 }
1342 if (!surfaceDelegator->SetBufferQueue(this)) {
1343 BLOGE("Failed to set bufferqueue");
1344 return GSERROR_INVALID_ARGUMENTS;
1345 }
1346
1347 surfaceDelegator->SetSurface(cSurface);
1348 wpCSurfaceDelegator_ = surfaceDelegator;
1349 return GSERROR_OK;
1350 }
1351
SetQueueSize(uint32_t queueSize)1352 GSError BufferQueue::SetQueueSize(uint32_t queueSize)
1353 {
1354 if (queueSize == 0) {
1355 BLOGW("queue size: %{public}u, uniqueId: %{public}" PRIu64 ".", queueSize, uniqueId_);
1356 return GSERROR_INVALID_ARGUMENTS;
1357 }
1358
1359 if (queueSize > SURFACE_MAX_QUEUE_SIZE) {
1360 BLOGW("invalid queueSize: %{public}u, uniqueId: %{public}" PRIu64 ".",
1361 queueSize, uniqueId_);
1362 return GSERROR_INVALID_ARGUMENTS;
1363 }
1364
1365 std::unique_lock<std::mutex> lock(mutex_);
1366 if (queueSize < detachReserveSlotNum_) {
1367 BLOGW("invalid queueSize: %{public}u, reserveSlotNum: %{public}u, uniqueId: %{public}" PRIu64 ".",
1368 queueSize, detachReserveSlotNum_, uniqueId_);
1369 return GSERROR_INVALID_ARGUMENTS;
1370 }
1371 if (bufferQueueSize_ > queueSize) {
1372 DeleteBuffersLocked(bufferQueueSize_ - queueSize, lock);
1373 }
1374 // if increase the queue size, try to wakeup the blocked thread
1375 if (queueSize > bufferQueueSize_) {
1376 bufferQueueSize_ = queueSize;
1377 waitReqCon_.notify_all();
1378 } else {
1379 bufferQueueSize_ = queueSize;
1380 }
1381
1382 return GSERROR_OK;
1383 }
1384
GetName(std::string & name)1385 GSError BufferQueue::GetName(std::string &name)
1386 {
1387 name = name_;
1388 return GSERROR_OK;
1389 }
1390
RegisterConsumerListener(sptr<IBufferConsumerListener> & listener)1391 GSError BufferQueue::RegisterConsumerListener(sptr<IBufferConsumerListener> &listener)
1392 {
1393 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1394 listener_ = listener;
1395 return GSERROR_OK;
1396 }
1397
RegisterConsumerListener(IBufferConsumerListenerClazz * listener)1398 GSError BufferQueue::RegisterConsumerListener(IBufferConsumerListenerClazz *listener)
1399 {
1400 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1401 listenerClazz_ = listener;
1402 return GSERROR_OK;
1403 }
1404
UnregisterConsumerListener()1405 GSError BufferQueue::UnregisterConsumerListener()
1406 {
1407 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1408 listener_ = nullptr;
1409 listenerClazz_ = nullptr;
1410 return GSERROR_OK;
1411 }
1412
RegisterReleaseListener(OnReleaseFunc func)1413 GSError BufferQueue::RegisterReleaseListener(OnReleaseFunc func)
1414 {
1415 std::lock_guard<std::mutex> lockGuard(onBufferReleaseMutex_);
1416 onBufferRelease_ = func;
1417 return GSERROR_OK;
1418 }
1419
RegisterProducerReleaseListener(sptr<IProducerListener> listener)1420 GSError BufferQueue::RegisterProducerReleaseListener(sptr<IProducerListener> listener)
1421 {
1422 std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1423 producerListener_ = listener;
1424 return GSERROR_OK;
1425 }
1426
RegisterProducerPropertyListener(sptr<IProducerListener> listener,uint64_t producerId)1427 GSError BufferQueue::RegisterProducerPropertyListener(sptr<IProducerListener> listener, uint64_t producerId)
1428 {
1429 std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1430 return BufferUtilRegisterPropertyListener(listener, producerId, propertyChangeListeners_);
1431 }
1432
UnRegisterProducerPropertyListener(uint64_t producerId)1433 GSError BufferQueue::UnRegisterProducerPropertyListener(uint64_t producerId)
1434 {
1435 std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1436 return BufferUtilUnRegisterPropertyListener(producerId, propertyChangeListeners_);
1437 }
1438
RegisterProducerReleaseListenerBackup(sptr<IProducerListener> listener)1439 GSError BufferQueue::RegisterProducerReleaseListenerBackup(sptr<IProducerListener> listener)
1440 {
1441 std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1442 producerListenerBackup_ = listener;
1443 return GSERROR_OK;
1444 }
1445
UnRegisterProducerReleaseListener()1446 GSError BufferQueue::UnRegisterProducerReleaseListener()
1447 {
1448 std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1449 producerListener_ = nullptr;
1450 return GSERROR_OK;
1451 }
1452
UnRegisterProducerReleaseListenerBackup()1453 GSError BufferQueue::UnRegisterProducerReleaseListenerBackup()
1454 {
1455 std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1456 producerListenerBackup_ = nullptr;
1457 return GSERROR_OK;
1458 }
1459
RegisterDeleteBufferListener(OnDeleteBufferFunc func,bool isForUniRedraw)1460 GSError BufferQueue::RegisterDeleteBufferListener(OnDeleteBufferFunc func, bool isForUniRedraw)
1461 {
1462 std::lock_guard<std::mutex> lockGuard(mutex_);
1463 if (isForUniRedraw) {
1464 if (onBufferDeleteForRSHardwareThread_ != nullptr) {
1465 return GSERROR_OK;
1466 }
1467 onBufferDeleteForRSHardwareThread_ = func;
1468 } else {
1469 if (onBufferDeleteForRSMainThread_ != nullptr) {
1470 return GSERROR_OK;
1471 }
1472 onBufferDeleteForRSMainThread_ = func;
1473 }
1474 return GSERROR_OK;
1475 }
1476
SetDefaultWidthAndHeight(int32_t width,int32_t height)1477 GSError BufferQueue::SetDefaultWidthAndHeight(int32_t width, int32_t height)
1478 {
1479 if (width <= 0) {
1480 BLOGW("width is %{public}d, uniqueId: %{public}" PRIu64 ".", width, uniqueId_);
1481 return GSERROR_INVALID_ARGUMENTS;
1482 }
1483
1484 if (height <= 0) {
1485 BLOGW("height is %{public}d, uniqueId: %{public}" PRIu64 ".", height, uniqueId_);
1486 return GSERROR_INVALID_ARGUMENTS;
1487 }
1488 std::lock_guard<std::mutex> lockGuard(mutex_);
1489 defaultWidth_ = width;
1490 defaultHeight_ = height;
1491 return GSERROR_OK;
1492 }
1493
GetDefaultWidth()1494 int32_t BufferQueue::GetDefaultWidth()
1495 {
1496 std::lock_guard<std::mutex> lockGuard(mutex_);
1497 return defaultWidth_;
1498 }
1499
GetDefaultHeight()1500 int32_t BufferQueue::GetDefaultHeight()
1501 {
1502 std::lock_guard<std::mutex> lockGuard(mutex_);
1503 return defaultHeight_;
1504 }
1505
SetDefaultUsage(uint64_t usage)1506 GSError BufferQueue::SetDefaultUsage(uint64_t usage)
1507 {
1508 std::lock_guard<std::mutex> lockGuard(mutex_);
1509 defaultUsage_ = usage;
1510 return GSERROR_OK;
1511 }
1512
GetDefaultUsage()1513 uint64_t BufferQueue::GetDefaultUsage()
1514 {
1515 std::lock_guard<std::mutex> lockGuard(mutex_);
1516 return defaultUsage_;
1517 }
1518
ClearLocked(std::unique_lock<std::mutex> & lock)1519 void BufferQueue::ClearLocked(std::unique_lock<std::mutex> &lock)
1520 {
1521 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1522 for (auto &[id, _] : bufferQueueCache_) {
1523 OnBufferDeleteForRS(id);
1524 }
1525 bufferQueueCache_.clear();
1526 freeList_.clear();
1527 dirtyList_.clear();
1528 deletingList_.clear();
1529 }
1530
GoBackground()1531 GSError BufferQueue::GoBackground()
1532 {
1533 sptr<IBufferConsumerListener> listener;
1534 IBufferConsumerListenerClazz *listenerClazz;
1535 {
1536 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1537 listener = listener_;
1538 listenerClazz = listenerClazz_;
1539 }
1540 if (listener != nullptr) {
1541 SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1542 listener->OnGoBackground();
1543 } else if (listenerClazz != nullptr) {
1544 SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1545 listenerClazz->OnGoBackground();
1546 }
1547 std::unique_lock<std::mutex> lock(mutex_);
1548 ClearLocked(lock);
1549 waitReqCon_.notify_all();
1550 SetProducerCacheCleanFlagLocked(false, lock);
1551 return GSERROR_OK;
1552 }
1553
CleanCache(bool cleanAll,uint32_t * bufSeqNum)1554 GSError BufferQueue::CleanCache(bool cleanAll, uint32_t *bufSeqNum)
1555 {
1556 sptr<IBufferConsumerListener> listener;
1557 IBufferConsumerListenerClazz *listenerClazz;
1558 {
1559 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1560 listener = listener_;
1561 listenerClazz = listenerClazz_;
1562 }
1563 if (cleanAll) {
1564 if (listener != nullptr) {
1565 SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1566 listener->OnGoBackground();
1567 } else if (listenerClazz != nullptr) {
1568 SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1569 listenerClazz->OnGoBackground();
1570 }
1571 } else {
1572 if (listener != nullptr) {
1573 SURFACE_TRACE_NAME_FMT("OnCleanCache name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1574 listener->OnCleanCache(bufSeqNum);
1575 } else if (listenerClazz != nullptr) {
1576 SURFACE_TRACE_NAME_FMT("OnCleanCache name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1577 listenerClazz->OnCleanCache(bufSeqNum);
1578 }
1579 }
1580 std::unique_lock<std::mutex> lock(mutex_);
1581 ClearLocked(lock);
1582 waitReqCon_.notify_all();
1583 return GSERROR_OK;
1584 }
1585
OnConsumerDied()1586 GSError BufferQueue::OnConsumerDied()
1587 {
1588 std::unique_lock<std::mutex> lock(mutex_);
1589 ClearLocked(lock);
1590 waitReqCon_.notify_all();
1591 return GSERROR_OK;
1592 }
1593
IsSurfaceBufferInCache(uint32_t seqNum,bool & isInCache)1594 GSError BufferQueue::IsSurfaceBufferInCache(uint32_t seqNum, bool &isInCache)
1595 {
1596 std::unique_lock<std::mutex> lock(mutex_);
1597 if (bufferQueueCache_.find(seqNum) != bufferQueueCache_.end()) {
1598 isInCache = true;
1599 } else {
1600 isInCache = false;
1601 }
1602 return GSERROR_OK;
1603 }
1604
GetUniqueId() const1605 uint64_t BufferQueue::GetUniqueId() const
1606 {
1607 std::unique_lock<std::mutex> lock(mutex_);
1608 return uniqueId_;
1609 }
1610
SetTransform(GraphicTransformType transform)1611 GSError BufferQueue::SetTransform(GraphicTransformType transform)
1612 {
1613 {
1614 std::unique_lock<std::mutex> lock(mutex_);
1615 if (transform_ == transform) {
1616 return GSERROR_OK;
1617 }
1618
1619 transform_ = transform;
1620 }
1621 sptr<IBufferConsumerListener> listener;
1622 IBufferConsumerListenerClazz *listenerClazz;
1623 {
1624 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1625 listener = listener_;
1626 listenerClazz = listenerClazz_;
1627 }
1628 if (listener != nullptr) {
1629 SURFACE_TRACE_NAME_FMT("OnTransformChange transform: %u", transform);
1630 listener->OnTransformChange();
1631 } else if (listenerClazz != nullptr) {
1632 SURFACE_TRACE_NAME_FMT("OnTransformChange transform: %u", transform);
1633 listenerClazz->OnTransformChange();
1634 }
1635 return GSERROR_OK;
1636 }
1637
GetTransform() const1638 GraphicTransformType BufferQueue::GetTransform() const
1639 {
1640 std::unique_lock<std::mutex> lock(mutex_);
1641 return transform_;
1642 }
1643
SetTransformHint(GraphicTransformType transformHint,uint64_t producerId)1644 GSError BufferQueue::SetTransformHint(GraphicTransformType transformHint, uint64_t producerId)
1645 {
1646 {
1647 std::unique_lock<std::mutex> lock(mutex_);
1648 if (transformHint_ != transformHint) {
1649 transformHint_ = transformHint;
1650 } else {
1651 return GSERROR_OK;
1652 }
1653 }
1654
1655 std::map<uint64_t, sptr<IProducerListener>> propertyListeners;
1656 {
1657 std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1658 if (propertyChangeListeners_.empty()) {
1659 return GSERROR_OK;
1660 }
1661 propertyListeners = propertyChangeListeners_;
1662 }
1663 SurfaceProperty property = {
1664 .transformHint = transformHint,
1665 };
1666 for (const auto& item: propertyListeners) {
1667 SURFACE_TRACE_NAME_FMT("propertyListeners %u, val %d", item.first, (int)property.transformHint);
1668 if (producerId == item.first) {
1669 continue;
1670 }
1671 if (item.second->OnPropertyChange(property) != GSERROR_OK) {
1672 BLOGE("OnPropertyChange failed, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1673 }
1674 }
1675 return GSERROR_OK;
1676 }
1677
GetTransformHint() const1678 GraphicTransformType BufferQueue::GetTransformHint() const
1679 {
1680 std::unique_lock<std::mutex> lock(mutex_);
1681 return transformHint_;
1682 }
1683
SetSurfaceSourceType(OHSurfaceSource sourceType)1684 GSError BufferQueue::SetSurfaceSourceType(OHSurfaceSource sourceType)
1685 {
1686 std::unique_lock<std::mutex> lock(mutex_);
1687 sourceType_ = sourceType;
1688 return GSERROR_OK;
1689 }
1690
GetSurfaceSourceType() const1691 OHSurfaceSource BufferQueue::GetSurfaceSourceType() const
1692 {
1693 std::unique_lock<std::mutex> lock(mutex_);
1694 return sourceType_;
1695 }
1696
SetHdrWhitePointBrightness(float brightness)1697 GSError BufferQueue::SetHdrWhitePointBrightness(float brightness)
1698 {
1699 std::unique_lock<std::mutex> lock(mutex_);
1700 hdrWhitePointBrightness_ = brightness;
1701 return GSERROR_OK;
1702 }
1703
SetSdrWhitePointBrightness(float brightness)1704 GSError BufferQueue::SetSdrWhitePointBrightness(float brightness)
1705 {
1706 std::unique_lock<std::mutex> lock(mutex_);
1707 sdrWhitePointBrightness_ = brightness;
1708 return GSERROR_OK;
1709 }
1710
GetHdrWhitePointBrightness() const1711 float BufferQueue::GetHdrWhitePointBrightness() const
1712 {
1713 std::unique_lock<std::mutex> lock(mutex_);
1714 return hdrWhitePointBrightness_;
1715 }
1716
GetSdrWhitePointBrightness() const1717 float BufferQueue::GetSdrWhitePointBrightness() const
1718 {
1719 std::unique_lock<std::mutex> lock(mutex_);
1720 return sdrWhitePointBrightness_;
1721 }
1722
SetSurfaceAppFrameworkType(std::string appFrameworkType)1723 GSError BufferQueue::SetSurfaceAppFrameworkType(std::string appFrameworkType)
1724 {
1725 if (appFrameworkType.empty()) {
1726 return GSERROR_NO_ENTRY;
1727 }
1728 if (appFrameworkType.size() > MAXIMUM_LENGTH_OF_APP_FRAMEWORK) {
1729 return GSERROR_OUT_OF_RANGE;
1730 }
1731 std::unique_lock<std::mutex> lock(mutex_);
1732 appFrameworkType_ = appFrameworkType;
1733 return GSERROR_OK;
1734 }
1735
GetSurfaceAppFrameworkType() const1736 std::string BufferQueue::GetSurfaceAppFrameworkType() const
1737 {
1738 std::unique_lock<std::mutex> lock(mutex_);
1739 return appFrameworkType_;
1740 }
1741
SetBufferHold(bool hold)1742 GSError BufferQueue::SetBufferHold(bool hold)
1743 {
1744 std::unique_lock<std::mutex> lock(mutex_);
1745 isBufferHold_ = hold;
1746 return GSERROR_OK;
1747 }
1748
SetBufferName(const std::string & bufferName)1749 GSError BufferQueue::SetBufferName(const std::string &bufferName)
1750 {
1751 std::unique_lock<std::mutex> lock(mutex_);
1752 bufferName_ = bufferName;
1753 return GSERROR_OK;
1754 }
1755
SetScalingMode(uint32_t sequence,ScalingMode scalingMode)1756 GSError BufferQueue::SetScalingMode(uint32_t sequence, ScalingMode scalingMode)
1757 {
1758 std::lock_guard<std::mutex> lockGuard(mutex_);
1759 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1760 return GSERROR_NO_ENTRY;
1761 }
1762 bufferQueueCache_[sequence].buffer->SetSurfaceBufferScalingMode(scalingMode);
1763 return GSERROR_OK;
1764 }
1765
SetScalingMode(ScalingMode scalingMode)1766 GSError BufferQueue::SetScalingMode(ScalingMode scalingMode)
1767 {
1768 std::lock_guard<std::mutex> lockGuard(mutex_);
1769 for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1770 it->second.buffer->SetSurfaceBufferScalingMode(scalingMode);
1771 }
1772 scalingMode_ = scalingMode;
1773 return GSERROR_OK;
1774 }
1775
GetScalingMode(uint32_t sequence,ScalingMode & scalingMode)1776 GSError BufferQueue::GetScalingMode(uint32_t sequence, ScalingMode &scalingMode)
1777 {
1778 std::lock_guard<std::mutex> lockGuard(mutex_);
1779 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1780 return GSERROR_NO_ENTRY;
1781 }
1782 scalingMode = bufferQueueCache_.at(sequence).buffer->GetSurfaceBufferScalingMode();
1783 return GSERROR_OK;
1784 }
1785
SetMetaData(uint32_t sequence,const std::vector<GraphicHDRMetaData> & metaData)1786 GSError BufferQueue::SetMetaData(uint32_t sequence, const std::vector<GraphicHDRMetaData> &metaData)
1787 {
1788 std::lock_guard<std::mutex> lockGuard(mutex_);
1789 if (metaData.size() == 0) {
1790 BLOGW("metaData size is 0, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1791 return GSERROR_INVALID_ARGUMENTS;
1792 }
1793 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1794 return GSERROR_NO_ENTRY;
1795 }
1796 bufferQueueCache_[sequence].metaData.clear();
1797 bufferQueueCache_[sequence].metaData = metaData;
1798 bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA;
1799 return GSERROR_OK;
1800 }
1801
SetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey key,const std::vector<uint8_t> & metaData)1802 GSError BufferQueue::SetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey key,
1803 const std::vector<uint8_t> &metaData)
1804 {
1805 std::lock_guard<std::mutex> lockGuard(mutex_);
1806 if (key < GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X ||
1807 key > GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID) {
1808 BLOGW("key is %{public}d, uniqueId: %{public}" PRIu64 ".", key, uniqueId_);
1809 return GSERROR_INVALID_ARGUMENTS;
1810 }
1811 if (metaData.size() == 0) {
1812 BLOGW("metaData size is 0, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1813 return GSERROR_INVALID_ARGUMENTS;
1814 }
1815 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1816 return GSERROR_NO_ENTRY;
1817 }
1818 bufferQueueCache_[sequence].metaDataSet.clear();
1819 bufferQueueCache_[sequence].key = key;
1820 bufferQueueCache_[sequence].metaDataSet = metaData;
1821 bufferQueueCache_[sequence].hdrMetaDataType = HDRMetaDataType::HDR_META_DATA_SET;
1822 return GSERROR_OK;
1823 }
1824
QueryMetaDataType(uint32_t sequence,HDRMetaDataType & type)1825 GSError BufferQueue::QueryMetaDataType(uint32_t sequence, HDRMetaDataType &type)
1826 {
1827 std::lock_guard<std::mutex> lockGuard(mutex_);
1828 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1829 return GSERROR_NO_ENTRY;
1830 }
1831 type = bufferQueueCache_.at(sequence).hdrMetaDataType;
1832 return GSERROR_OK;
1833 }
1834
GetMetaData(uint32_t sequence,std::vector<GraphicHDRMetaData> & metaData)1835 GSError BufferQueue::GetMetaData(uint32_t sequence, std::vector<GraphicHDRMetaData> &metaData)
1836 {
1837 std::lock_guard<std::mutex> lockGuard(mutex_);
1838 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1839 return GSERROR_NO_ENTRY;
1840 }
1841 metaData.clear();
1842 metaData = bufferQueueCache_.at(sequence).metaData;
1843 return GSERROR_OK;
1844 }
1845
GetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey & key,std::vector<uint8_t> & metaData)1846 GSError BufferQueue::GetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey &key,
1847 std::vector<uint8_t> &metaData)
1848 {
1849 std::lock_guard<std::mutex> lockGuard(mutex_);
1850 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1851 return GSERROR_NO_ENTRY;
1852 }
1853 metaData.clear();
1854 key = bufferQueueCache_.at(sequence).key;
1855 metaData = bufferQueueCache_.at(sequence).metaDataSet;
1856 return GSERROR_OK;
1857 }
1858
SetTunnelHandle(const sptr<SurfaceTunnelHandle> & handle)1859 GSError BufferQueue::SetTunnelHandle(const sptr<SurfaceTunnelHandle> &handle)
1860 {
1861 std::lock_guard<std::mutex> lockGuard(mutex_);
1862 bool tunnelHandleChange = false;
1863 if (tunnelHandle_ == nullptr) {
1864 if (handle == nullptr) {
1865 BLOGW("tunnel handle is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1866 return GSERROR_INVALID_ARGUMENTS;
1867 }
1868 tunnelHandleChange = true;
1869 } else {
1870 tunnelHandleChange = tunnelHandle_->Different(handle);
1871 }
1872 if (!tunnelHandleChange) {
1873 BLOGW("same tunnel handle, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1874 return GSERROR_NO_ENTRY;
1875 }
1876 tunnelHandle_ = handle;
1877 sptr<IBufferConsumerListener> listener;
1878 IBufferConsumerListenerClazz *listenerClazz;
1879 {
1880 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1881 listener = listener_;
1882 listenerClazz = listenerClazz_;
1883 }
1884 if (listener != nullptr) {
1885 SURFACE_TRACE_NAME("OnTunnelHandleChange");
1886 listener->OnTunnelHandleChange();
1887 } else if (listenerClazz != nullptr) {
1888 SURFACE_TRACE_NAME("OnTunnelHandleChange");
1889 listenerClazz->OnTunnelHandleChange();
1890 } else {
1891 return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
1892 }
1893 return GSERROR_OK;
1894 }
1895
GetTunnelHandle()1896 sptr<SurfaceTunnelHandle> BufferQueue::GetTunnelHandle()
1897 {
1898 std::lock_guard<std::mutex> lockGuard(mutex_);
1899 return tunnelHandle_;
1900 }
1901
SetPresentTimestamp(uint32_t sequence,const GraphicPresentTimestamp & timestamp)1902 GSError BufferQueue::SetPresentTimestamp(uint32_t sequence, const GraphicPresentTimestamp ×tamp)
1903 {
1904 std::lock_guard<std::mutex> lockGuard(mutex_);
1905 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1906 return GSERROR_NO_ENTRY;
1907 }
1908 bufferQueueCache_[sequence].presentTimestamp = timestamp;
1909 return GSERROR_OK;
1910 }
1911
GetPresentTimestamp(uint32_t sequence,GraphicPresentTimestampType type,int64_t & time)1912 GSError BufferQueue::GetPresentTimestamp(uint32_t sequence, GraphicPresentTimestampType type, int64_t &time)
1913 {
1914 std::lock_guard<std::mutex> lockGuard(mutex_);
1915 if (bufferQueueCache_.find(sequence) == bufferQueueCache_.end()) {
1916 return GSERROR_NO_ENTRY;
1917 }
1918 if (type != bufferQueueCache_.at(sequence).presentTimestamp.type) {
1919 BLOGE("seq: %{public}u, PresentTimestampType [%{public}d] is not supported, the supported type is [%{public}d],"
1920 "uniqueId: %{public}" PRIu64 ".", sequence, type,
1921 bufferQueueCache_.at(sequence).presentTimestamp.type, uniqueId_);
1922 return GSERROR_NO_ENTRY;
1923 }
1924 switch (type) {
1925 case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_DELAY: {
1926 time = bufferQueueCache_.at(sequence).presentTimestamp.time;
1927 return GSERROR_OK;
1928 }
1929 case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_TIMESTAMP: {
1930 time = bufferQueueCache_.at(sequence).presentTimestamp.time - bufferQueueCache_.at(sequence).timestamp;
1931 return GSERROR_OK;
1932 }
1933 default: {
1934 BLOGE("seq: %{public}u, unsupported type: %{public}d, uniqueId: %{public}" PRIu64 ".",
1935 sequence, type, uniqueId_);
1936 return GSERROR_TYPE_ERROR;
1937 }
1938 }
1939 }
1940
SetSurfaceBufferGlobalAlphaUnlocked(sptr<SurfaceBuffer> buffer)1941 void BufferQueue::SetSurfaceBufferGlobalAlphaUnlocked(sptr<SurfaceBuffer> buffer)
1942 {
1943 std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
1944 if (globalAlpha_ < FORCE_GLOBAL_ALPHA_MIN || globalAlpha_ > FORCE_GLOBAL_ALPHA_MAX) {
1945 BLOGE("Invalid global alpha value: %{public}d, uniqueId: %{public}" PRIu64 ".", globalAlpha_, uniqueId_);
1946 return;
1947 }
1948 using namespace HDI::Display::Graphic::Common;
1949 V2_0::BufferHandleAttrKey key = V2_0::BufferHandleAttrKey::ATTRKEY_FORCE_GLOBAL_ALPHA;
1950 std::vector<uint8_t> values;
1951 auto ret = MetadataHelper::ConvertMetadataToVec(globalAlpha_, values);
1952 if (ret != GSERROR_OK) {
1953 BLOGE("Convert global alpha value failed, ret: %{public}d, value: %{public}d, uniqueId: %{public}" PRIu64 ".",
1954 ret, globalAlpha_, uniqueId_);
1955 return;
1956 }
1957 buffer->SetMetadata(key, values);
1958 }
1959
SetGlobalAlpha(int32_t alpha)1960 GSError BufferQueue::SetGlobalAlpha(int32_t alpha)
1961 {
1962 std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
1963 globalAlpha_ = alpha;
1964 return GSERROR_OK;
1965 }
1966
GetGlobalAlpha(int32_t & alpha)1967 GSError BufferQueue::GetGlobalAlpha(int32_t &alpha)
1968 {
1969 std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
1970 alpha = globalAlpha_;
1971 return GSERROR_OK;
1972 }
1973
DumpMetadata(std::string & result,BufferElement element)1974 void BufferQueue::DumpMetadata(std::string &result, BufferElement element)
1975 {
1976 HDI::Display::Graphic::Common::V1_0::CM_ColorSpaceType colorSpaceType;
1977 MetadataHelper::GetColorSpaceType(element.buffer, colorSpaceType);
1978 HDI::Display::Graphic::Common::V1_0::CM_HDR_Metadata_Type hdrMetadataType =
1979 HDI::Display::Graphic::Common::V1_0::CM_METADATA_NONE;
1980 std::vector<uint8_t> dataStatic;
1981 std::vector<uint8_t> dataDynamic;
1982 MetadataHelper::GetHDRDynamicMetadata(element.buffer, dataDynamic);
1983 MetadataHelper::GetHDRStaticMetadata(element.buffer, dataStatic);
1984 MetadataHelper::GetHDRMetadataType(element.buffer, hdrMetadataType);
1985 result += std::to_string(colorSpaceType) + ", ";
1986 result += " [staticMetadata: ";
1987 for (auto x : dataStatic) {
1988 result += std::to_string(x);
1989 result += " ";
1990 }
1991 result += " ],[dynamicMetadata: ";
1992 for (auto x : dataDynamic) {
1993 result += std::to_string(x);
1994 result += " ";
1995 }
1996 result += " ],[metadataType: ";
1997 result += std::to_string(hdrMetadataType) + "],";
1998 }
1999
DumpCache(std::string & result)2000 void BufferQueue::DumpCache(std::string &result)
2001 {
2002 for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
2003 BufferElement element = it->second;
2004 if (BufferStateStrs.find(element.state) != BufferStateStrs.end()) {
2005 result += " sequence = " + std::to_string(it->first) +
2006 ", state = " + std::to_string(element.state) +
2007 ", timestamp = " + std::to_string(element.timestamp);
2008 }
2009 for (decltype(element.damages.size()) i = 0; i < element.damages.size(); i++) {
2010 result += ", damagesRect = [" + std::to_string(i) + "] = [" +
2011 std::to_string(element.damages[i].x) + ", " +
2012 std::to_string(element.damages[i].y) + ", " +
2013 std::to_string(element.damages[i].w) + ", " +
2014 std::to_string(element.damages[i].h) + "],";
2015 }
2016 result += " config = [" + std::to_string(element.config.width) + "x" +
2017 std::to_string(element.config.height) + ", " +
2018 std::to_string(element.config.strideAlignment) + ", " +
2019 std::to_string(element.config.format) +", " +
2020 std::to_string(element.config.usage) + ", " +
2021 std::to_string(element.config.timeout) + ", " +
2022 std::to_string(element.config.colorGamut) + ", " +
2023 std::to_string(element.config.transform) + "],";
2024 DumpMetadata(result, element);
2025 result += " scalingMode = " + std::to_string(element.buffer->GetSurfaceBufferScalingMode()) + ",";
2026 result += " HDR = " + std::to_string(element.hdrMetaDataType) + ", ";
2027
2028 double bufferMemSize = 0;
2029 if (element.buffer != nullptr) {
2030 result += " bufferWith = " + std::to_string(element.buffer->GetWidth()) +
2031 ", bufferHeight = " + std::to_string(element.buffer->GetHeight());
2032 bufferMemSize = static_cast<double>(element.buffer->GetSize()) / BUFFER_MEMSIZE_RATE;
2033 }
2034
2035 std::ostringstream ss;
2036 ss.precision(BUFFER_MEMSIZE_FORMAT);
2037 ss.setf(std::ios::fixed);
2038 ss << bufferMemSize;
2039 std::string str = ss.str();
2040 result += ", bufferMemSize = " + str + "(KiB).\n";
2041 }
2042 }
2043
Dump(std::string & result)2044 void BufferQueue::Dump(std::string &result)
2045 {
2046 std::unique_lock<std::mutex> lock(mutex_);
2047 std::ostringstream ss;
2048 ss.precision(BUFFER_MEMSIZE_FORMAT);
2049 ss.setf(std::ios::fixed);
2050 static double allSurfacesMemSize = 0;
2051 uint64_t totalBufferListSize = 0;
2052 double memSizeInKB = 0;
2053
2054 isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
2055 for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
2056 BufferElement element = it->second;
2057 if (element.buffer != nullptr) {
2058 totalBufferListSize += element.buffer->GetSize();
2059 }
2060 }
2061 memSizeInKB = static_cast<double>(totalBufferListSize) / BUFFER_MEMSIZE_RATE;
2062
2063 allSurfacesMemSize += memSizeInKB;
2064 uint32_t resultLen = result.size();
2065 std::string dumpEndFlag = "dumpend";
2066 if (resultLen > dumpEndFlag.size() && resultLen > 1) {
2067 std::string dumpEndIn(result, resultLen - dumpEndFlag.size(), resultLen - 1);
2068 if (dumpEndIn == dumpEndFlag) {
2069 ss << allSurfacesMemSize;
2070 std::string dumpEndStr = ss.str();
2071 result.erase(resultLen - dumpEndFlag.size(), resultLen - 1);
2072 result += dumpEndStr + " KiB.\n";
2073 allSurfacesMemSize = 0;
2074 return;
2075 }
2076 }
2077
2078 ss.str("");
2079 ss << memSizeInKB;
2080 std::string str = ss.str();
2081 result.append("\nBufferQueue:\n");
2082 result += " default-size = [" + std::to_string(defaultWidth_) + "x" + std::to_string(defaultHeight_) + "]" +
2083 ", FIFO = " + std::to_string(bufferQueueSize_) +
2084 ", name = " + name_ +
2085 ", uniqueId = " + std::to_string(uniqueId_) +
2086 ", usedBufferListLen = " + std::to_string(GetUsedSize()) +
2087 ", freeBufferListLen = " + std::to_string(freeList_.size()) +
2088 ", dirtyBufferListLen = " + std::to_string(dirtyList_.size()) +
2089 ", totalBuffersMemSize = " + str + "(KiB)" +
2090 ", hdrWhitePointBrightness = " + std::to_string(hdrWhitePointBrightness_) +
2091 ", sdrWhitePointBrightness = " + std::to_string(sdrWhitePointBrightness_) +
2092 ", lockLastFlushedBuffer seq = " + std::to_string(acquireLastFlushedBufSequence_) + "\n";
2093
2094 result.append(" bufferQueueCache:\n");
2095 DumpCache(result);
2096 }
2097
GetStatusLocked() const2098 bool BufferQueue::GetStatusLocked() const
2099 {
2100 return isValidStatus_;
2101 }
2102
GetStatus() const2103 bool BufferQueue::GetStatus() const
2104 {
2105 std::lock_guard<std::mutex> lockGuard(mutex_);
2106 return GetStatusLocked();
2107 }
2108
SetStatus(bool status)2109 void BufferQueue::SetStatus(bool status)
2110 {
2111 std::lock_guard<std::mutex> lockGuard(mutex_);
2112 isValidStatus_ = status;
2113 waitReqCon_.notify_all();
2114 }
2115
GetAvailableBufferCount()2116 uint32_t BufferQueue::GetAvailableBufferCount()
2117 {
2118 std::lock_guard<std::mutex> lockGuard(mutex_);
2119 return static_cast<uint32_t>(dirtyList_.size());
2120 }
2121
SetConnectedPid(int32_t connectedPid)2122 void BufferQueue::SetConnectedPid(int32_t connectedPid)
2123 {
2124 connectedPid_ = connectedPid;
2125 }
2126
2127 /**
2128 * @brief Optimize the original FlushBuffer to reduce segmentation locking.
2129 */
FlushBufferImprovedLocked(uint32_t sequence,sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfigWithDamages & config,std::unique_lock<std::mutex> & lock)2130 GSError BufferQueue::FlushBufferImprovedLocked(uint32_t sequence, sptr<BufferExtraData> &bedata,
2131 const sptr<SyncFence> &fence, const BufferFlushConfigWithDamages &config, std::unique_lock<std::mutex> &lock)
2132 {
2133 if (!GetStatusLocked()) {
2134 SURFACE_TRACE_NAME_FMT("status: %d", GetStatusLocked());
2135 BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
2136 }
2137 // check param
2138 auto sret = CheckFlushConfig(config);
2139 if (sret != GSERROR_OK) {
2140 BLOGE("CheckFlushConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
2141 return sret;
2142 }
2143
2144 sret = CheckBufferQueueCacheLocked(sequence);
2145 if (sret != GSERROR_OK) {
2146 return sret;
2147 }
2148
2149 {
2150 std::lock_guard<std::mutex> lockGuard(listenerMutex_);
2151 if (listener_ == nullptr && listenerClazz_ == nullptr) {
2152 BLOGE("listener is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
2153 return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
2154 }
2155 }
2156 sret = DoFlushBufferLocked(sequence, bedata, fence, config, lock);
2157 if (sret != GSERROR_OK) {
2158 return sret;
2159 }
2160 return sret;
2161 }
2162
RequestAndDetachBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)2163 GSError BufferQueue::RequestAndDetachBuffer(const BufferRequestConfig& config, sptr<BufferExtraData>& bedata,
2164 struct IBufferProducer::RequestBufferReturnValue& retval)
2165 {
2166 SURFACE_TRACE_NAME_FMT("RequestAndDetachBuffer queueId: %" PRIu64, uniqueId_);
2167 std::unique_lock<std::mutex> lock(mutex_);
2168 auto ret = RequestBufferLocked(config, bedata, retval, lock);
2169 if (ret != GSERROR_OK) {
2170 return ret;
2171 }
2172 return DetachBufferFromQueueLocked(retval.sequence, InvokerType::PRODUCER_INVOKER, lock, false);
2173 }
2174
AttachAndFlushBuffer(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,BufferFlushConfigWithDamages & config,bool needMap)2175 GSError BufferQueue::AttachAndFlushBuffer(sptr<SurfaceBuffer>& buffer, sptr<BufferExtraData>& bedata,
2176 const sptr<SyncFence>& fence, BufferFlushConfigWithDamages& config, bool needMap)
2177 {
2178 SURFACE_TRACE_NAME_FMT("AttachAndFlushBuffer queueId: %" PRIu64 " sequence: %u", uniqueId_, buffer->GetSeqNum());
2179 GSError ret;
2180 {
2181 std::unique_lock<std::mutex> lock(mutex_);
2182 ret = AttachBufferToQueueLocked(buffer, InvokerType::PRODUCER_INVOKER, needMap);
2183 if (ret != GSERROR_OK) {
2184 return ret;
2185 }
2186 uint32_t sequence = buffer->GetSeqNum();
2187 ret = FlushBufferImprovedLocked(sequence, bedata, fence, config, lock);
2188 if (ret != GSERROR_OK) {
2189 for (auto it = dirtyList_.begin(); it != dirtyList_.end(); it++) {
2190 if (*it == sequence) {
2191 dirtyList_.erase(it);
2192 break;
2193 }
2194 }
2195 bufferQueueCache_.erase(sequence);
2196 return ret;
2197 }
2198 }
2199 CallConsumerListener();
2200 return ret;
2201 }
2202
GetLastFlushedDesiredPresentTimeStamp(int64_t & lastFlushedDesiredPresentTimeStamp)2203 GSError BufferQueue::GetLastFlushedDesiredPresentTimeStamp(int64_t &lastFlushedDesiredPresentTimeStamp)
2204 {
2205 std::lock_guard<std::mutex> lockGuard(mutex_);
2206 lastFlushedDesiredPresentTimeStamp = lastFlushedDesiredPresentTimeStamp_;
2207 return GSERROR_OK;
2208 }
2209
GetBufferSupportFastCompose(bool & bufferSupportFastCompose)2210 GSError BufferQueue::GetBufferSupportFastCompose(bool &bufferSupportFastCompose)
2211 {
2212 std::lock_guard<std::mutex> lockGuard(mutex_);
2213 bufferSupportFastCompose = bufferSupportFastCompose_;
2214 return GSERROR_OK;
2215 }
2216
GetBufferCacheConfig(const sptr<SurfaceBuffer> & buffer,BufferRequestConfig & config)2217 GSError BufferQueue::GetBufferCacheConfig(const sptr<SurfaceBuffer>& buffer, BufferRequestConfig& config)
2218 {
2219 std::lock_guard<std::mutex> lockGuard(mutex_);
2220 auto iter = bufferQueueCache_.find(buffer->GetSeqNum());
2221 if (iter == bufferQueueCache_.end()) {
2222 return GSERROR_BUFFER_NOT_INCACHE;
2223 }
2224 config = iter->second.config;
2225 return GSERROR_OK;
2226 }
2227
GetCycleBuffersNumber(uint32_t & cycleBuffersNumber)2228 GSError BufferQueue::GetCycleBuffersNumber(uint32_t& cycleBuffersNumber)
2229 {
2230 std::lock_guard<std::mutex> lockGuard(mutex_);
2231 return BufferUtilGetCycleBuffersNumber(cycleBuffersNumber, rotatingBufferNumber_, bufferQueueSize_);
2232 }
2233
SetCycleBuffersNumber(uint32_t cycleBuffersNumber)2234 GSError BufferQueue::SetCycleBuffersNumber(uint32_t cycleBuffersNumber)
2235 {
2236 // 2 : two times of the max queue size
2237 if (cycleBuffersNumber == 0 || cycleBuffersNumber > SURFACE_MAX_QUEUE_SIZE * 2) {
2238 BLOGE("Set rotating buffers number : %{public}u failed", cycleBuffersNumber);
2239 return GSERROR_INVALID_ARGUMENTS;
2240 }
2241 std::lock_guard<std::mutex> lockGuard(mutex_);
2242 rotatingBufferNumber_ = cycleBuffersNumber;
2243 return GSERROR_OK;
2244 }
2245
GetLastConsumeTime(int64_t & lastConsumeTime)2246 GSError BufferQueue::GetLastConsumeTime(int64_t &lastConsumeTime)
2247 {
2248 std::lock_guard<std::mutex> lockGuard(mutex_);
2249 lastConsumeTime = lastConsumeTime_;
2250 return GSERROR_OK;
2251 }
2252 }; // namespace OHOS
2253