• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "buffer_queue.h"
17 #include <algorithm>
18 #include <chrono>
19 #include <fstream>
20 #include <linux/dma-buf.h>
21 #include <sstream>
22 #include <sys/ioctl.h>
23 #include <sys/time.h>
24 #include <sys/mman.h>
25 #include <cinttypes>
26 #include <unistd.h>
27 #include <parameters.h>
28 
29 #include "acquire_fence_manager.h"
30 #include "buffer_utils.h"
31 #include "buffer_log.h"
32 #include "hebc_white_list.h"
33 #include "hitrace_meter.h"
34 #include "metadata_helper.h"
35 #include "sandbox_utils.h"
36 #include "surface_buffer_impl.h"
37 #include "sync_fence.h"
38 #include "sync_fence_tracker.h"
39 #include "surface_utils.h"
40 #include "surface_trace.h"
41 #include "v2_0/buffer_handle_meta_key_type.h"
42 
43 #define DMA_BUF_SET_TYPE _IOW(DMA_BUF_BASE, 2, const char *)
44 
45 namespace OHOS {
46 namespace {
47 constexpr int32_t FORCE_GLOBAL_ALPHA_MIN = -1;
48 constexpr int32_t FORCE_GLOBAL_ALPHA_MAX = 255;
49 constexpr uint32_t UNIQUE_ID_OFFSET = 32;
50 constexpr uint32_t BUFFER_MEMSIZE_RATE = 1024;
51 constexpr uint32_t BUFFER_MEMSIZE_FORMAT = 2;
52 constexpr uint32_t MAXIMUM_LENGTH_OF_APP_FRAMEWORK = 64;
53 constexpr uint32_t INVALID_SEQUENCE = 0xFFFFFFFF;
54 constexpr uint32_t ONE_SECOND_TIMESTAMP = 1e9;
55 constexpr const char* BUFFER_SUPPORT_FASTCOMPOSE = "SupportFastCompose";
56 constexpr int32_t MIN_FRAME_GRAVITY = -1;
57 constexpr int32_t MAX_FRAME_GRAVITY = 15;
58 constexpr int32_t MIN_FIXED_ROTATION = -1;
59 constexpr int32_t MAX_FIXED_ROTATION = 1;
60 constexpr int32_t LPP_SLOT_SIZE = 8;
61 constexpr int32_t MAX_LPP_SKIP_COUNT = 10;
62 static const size_t LPP_SHARED_MEM_SIZE = 0x1000;
63 }
64 
65 static const std::map<BufferState, std::string> BufferStateStrs = {
66     {BUFFER_STATE_RELEASED,                    "0 <released>"},
67     {BUFFER_STATE_REQUESTED,                   "1 <requested>"},
68     {BUFFER_STATE_FLUSHED,                     "2 <flushed>"},
69     {BUFFER_STATE_ACQUIRED,                    "3 <acquired>"},
70     {BUFFER_STATE_ATTACHED,                    "4 <attached>"},
71 };
72 
GetUniqueIdImpl()73 static uint64_t GetUniqueIdImpl()
74 {
75     static std::atomic<uint32_t> counter { 0 };
76     static uint64_t id = static_cast<uint64_t>(GetRealPid()) << UNIQUE_ID_OFFSET;
77     return id | counter.fetch_add(1, std::memory_order_relaxed);
78 }
79 
IsLocalRender()80 static bool IsLocalRender()
81 {
82     std::ifstream procfile("/proc/self/cmdline");
83     if (!procfile.is_open()) {
84         BLOGE("Error opening procfile!");
85         return false;
86     }
87     std::string processName;
88     std::getline(procfile, processName);
89     procfile.close();
90     std::string target = "/system/bin/render_service";
91     bool result = processName.substr(0, target.size()) == target;
92     return result;
93 }
94 
BufferQueue(const std::string & name)95 BufferQueue::BufferQueue(const std::string &name)
96     : name_(name), uniqueId_(GetUniqueIdImpl()), isLocalRender_(IsLocalRender())
97 {
98     BLOGD("BufferQueue ctor, uniqueId: %{public}" PRIu64 ".", uniqueId_);
99     acquireLastFlushedBufSequence_ = INVALID_SEQUENCE;
100 
101     if (isLocalRender_) {
102         if (!HebcWhiteList::GetInstance().Init()) {
103             BLOGW("HebcWhiteList init failed");
104         }
105     }
106 }
107 
~BufferQueue()108 BufferQueue::~BufferQueue()
109 {
110     BLOGD("~BufferQueue dtor, uniqueId: %{public}" PRIu64 ".", uniqueId_);
111     for (auto &[id, _] : bufferQueueCache_) {
112         OnBufferDeleteForRS(id);
113     }
114     SetLppShareFd(-1, false);
115 }
116 
GetUsedSize()117 uint32_t BufferQueue::GetUsedSize()
118 {
119     return static_cast<uint32_t>(bufferQueueCache_.size());
120 }
121 
GetProducerInitInfo(ProducerInitInfo & info)122 GSError BufferQueue::GetProducerInitInfo(ProducerInitInfo &info)
123 {
124     static uint64_t producerId = 1; // producerId start from 1; 0 resvered for comsumer
125     std::lock_guard<std::mutex> lockGuard(mutex_);
126     info.name = name_;
127     info.width = defaultWidth_;
128     info.height = defaultHeight_;
129     info.uniqueId = uniqueId_;
130     info.isInHebcList = HebcWhiteList::GetInstance().Check(info.appName);
131     info.bufferName = bufferName_;
132     info.producerId = producerId++;
133     info.transformHint = transformHint_;
134     return GSERROR_OK;
135 }
136 
PopFromFreeListLocked(sptr<SurfaceBuffer> & buffer,const BufferRequestConfig & config)137 GSError BufferQueue::PopFromFreeListLocked(sptr<SurfaceBuffer> &buffer,
138     const BufferRequestConfig &config)
139 {
140     for (auto it = freeList_.begin(); it != freeList_.end(); it++) {
141         auto mapIter = bufferQueueCache_.find(*it);
142         if (mapIter != bufferQueueCache_.end() && mapIter->second.config == config) {
143             if (mapIter->first == acquireLastFlushedBufSequence_) {
144                 continue;
145             }
146             buffer = mapIter->second.buffer;
147             freeList_.erase(it);
148             return GSERROR_OK;
149         }
150     }
151 
152     if (freeList_.empty() || GetUsedSize() < bufferQueueSize_ - detachReserveSlotNum_ ||
153         (freeList_.size() == 1 && freeList_.front() == acquireLastFlushedBufSequence_)) {
154         buffer = nullptr;
155         return GSERROR_NO_BUFFER;
156     }
157 
158     if (freeList_.front() == acquireLastFlushedBufSequence_) {
159         freeList_.pop_front();
160         freeList_.push_back(acquireLastFlushedBufSequence_);
161     }
162 
163     buffer = bufferQueueCache_[freeList_.front()].buffer;
164     buffer->SetSurfaceBufferColorGamut(config.colorGamut);
165     buffer->SetSurfaceBufferTransform(config.transform);
166     freeList_.pop_front();
167     return GSERROR_OK;
168 }
169 
PopFromDirtyListLocked(sptr<SurfaceBuffer> & buffer)170 GSError BufferQueue::PopFromDirtyListLocked(sptr<SurfaceBuffer> &buffer)
171 {
172     if (!dirtyList_.empty()) {
173         buffer = bufferQueueCache_[dirtyList_.front()].buffer;
174         dirtyList_.pop_front();
175         return GSERROR_OK;
176     } else {
177         buffer = nullptr;
178         return GSERROR_NO_BUFFER;
179     }
180 }
181 
CheckRequestConfig(const BufferRequestConfig & config)182 GSError BufferQueue::CheckRequestConfig(const BufferRequestConfig &config)
183 {
184     if (config.colorGamut <= GraphicColorGamut::GRAPHIC_COLOR_GAMUT_INVALID ||
185         config.colorGamut > GraphicColorGamut::GRAPHIC_COLOR_GAMUT_DISPLAY_BT2020 + 1) {
186         BLOGW("colorGamut is %{public}d, uniqueId: %{public}" PRIu64 ".",
187             static_cast<uint32_t>(config.colorGamut), uniqueId_);
188         return GSERROR_INVALID_ARGUMENTS;
189     }
190 
191     if (config.transform < GraphicTransformType::GRAPHIC_ROTATE_NONE ||
192         config.transform >= GraphicTransformType::GRAPHIC_ROTATE_BUTT) {
193         BLOGW("transform is %{public}d, uniqueId: %{public}" PRIu64 ".", config.transform, uniqueId_);
194         return GSERROR_INVALID_ARGUMENTS;
195     }
196     return GSERROR_OK;
197 }
198 
CheckFlushConfig(const BufferFlushConfigWithDamages & config)199 GSError BufferQueue::CheckFlushConfig(const BufferFlushConfigWithDamages &config)
200 {
201     for (decltype(config.damages.size()) i = 0; i < config.damages.size(); i++) {
202         if (config.damages[i].w < 0 || config.damages[i].h < 0) {
203             BLOGW("damages[%{public}zu].w is %{public}d, .h is %{public}d, uniqueId: %{public}" PRIu64 ".",
204                 i, config.damages[i].w, config.damages[i].h, uniqueId_);
205             return GSERROR_INVALID_ARGUMENTS;
206         }
207     }
208     return GSERROR_OK;
209 }
210 
QueryIfBufferAvailable()211 bool BufferQueue::QueryIfBufferAvailable()
212 {
213     std::lock_guard<std::mutex> lockGuard(mutex_);
214     bool ret = !freeList_.empty() || (GetUsedSize() < bufferQueueSize_);
215     return ret;
216 }
217 
DelegatorDequeueBuffer(wptr<ConsumerSurfaceDelegator> & delegator,const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)218 static GSError DelegatorDequeueBuffer(wptr<ConsumerSurfaceDelegator>& delegator,
219                                       const BufferRequestConfig& config,
220                                       sptr<BufferExtraData>& bedata,
221                                       struct IBufferProducer::RequestBufferReturnValue& retval)
222 {
223     auto consumerDelegator = delegator.promote();
224     if (consumerDelegator == nullptr) {
225         BLOGE("Consumer surface delegator has been expired");
226         return GSERROR_INVALID_ARGUMENTS;
227     }
228     auto ret = consumerDelegator->DequeueBuffer(config, bedata, retval);
229     if (ret != GSERROR_OK) {
230         BLOGE("Consumer surface delegator failed to dequeuebuffer, err: %{public}d", ret);
231         return ret;
232     }
233 
234     ret = retval.buffer->Map();
235     if (ret != GSERROR_OK) {
236         BLOGE("Buffer map failed, err: %{public}d", ret);
237         return ret;
238     }
239     retval.buffer->SetSurfaceBufferWidth(retval.buffer->GetWidth());
240     retval.buffer->SetSurfaceBufferHeight(retval.buffer->GetHeight());
241 
242     return GSERROR_OK;
243 }
244 
SetReturnValue(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)245 static void SetReturnValue(sptr<SurfaceBuffer>& buffer, sptr<BufferExtraData>& bedata,
246                            struct IBufferProducer::RequestBufferReturnValue& retval)
247 {
248     retval.sequence = buffer->GetSeqNum();
249     bedata = buffer->GetExtraData();
250     retval.fence = SyncFence::InvalidFence();
251 }
252 
SetSurfaceBufferHebcMetaLocked(sptr<SurfaceBuffer> buffer)253 void BufferQueue::SetSurfaceBufferHebcMetaLocked(sptr<SurfaceBuffer> buffer)
254 {
255     using namespace HDI::Display::Graphic::Common;
256     // usage does not contain BUFFER_USAGE_CPU_HW_BOTH, just return
257     if (!(buffer->GetUsage() & BUFFER_USAGE_CPU_HW_BOTH)) {
258         return;
259     }
260 
261     V2_0::BufferHandleAttrKey key = V2_0::BufferHandleAttrKey::ATTRKEY_REQUEST_ACCESS_TYPE;
262     std::vector<uint8_t> values;
263     if (isCpuAccessable_) { // hebc is off
264         values.emplace_back(static_cast<uint8_t>(V2_0::HebcAccessType::HEBC_ACCESS_CPU_ACCESS));
265     } else { // hebc is on
266         values.emplace_back(static_cast<uint8_t>(V2_0::HebcAccessType::HEBC_ACCESS_HW_ONLY));
267     }
268 
269     buffer->SetMetadata(key, values);
270 }
271 
SetBatchHandle(bool batch)272 void BufferQueue::SetBatchHandle(bool batch)
273 {
274     std::unique_lock<std::mutex> lock(mutex_);
275     isBatch_ = batch;
276 }
277 
RequestBufferCheckStatus()278 GSError BufferQueue::RequestBufferCheckStatus()
279 {
280     if (isBatch_) {
281         return GSERROR_OK;
282     }
283     if (!GetStatusLocked()) {
284         SURFACE_TRACE_NAME_FMT("RequestBufferCheckStatus status wrong,"
285             "surface name: %s queueId: %" PRIu64 " status: %u", name_.c_str(), uniqueId_, GetStatusLocked());
286         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
287     }
288     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
289     if (listener_ == nullptr && listenerClazz_ == nullptr) {
290         SURFACE_TRACE_NAME_FMT("RequestBufferCheckStatus no listener, surface name: %s queueId: %" PRIu64,
291             name_.c_str(), uniqueId_);
292         BLOGN_FAILURE_RET(SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER);
293     }
294 
295     return GSERROR_OK;
296 }
297 
WaitForCondition()298 bool BufferQueue::WaitForCondition()
299 {
300     return (!freeList_.empty() && !(freeList_.size() == 1 && freeList_.front() == acquireLastFlushedBufSequence_)) ||
301         (GetUsedSize() < bufferQueueSize_ - detachReserveSlotNum_) || !GetStatusLocked();
302 }
303 
RequestBufferDebugInfoLocked()304 void BufferQueue::RequestBufferDebugInfoLocked()
305 {
306     SURFACE_TRACE_NAME_FMT("lockLastFlushedBuffer seq: %u, reserveSlotNum: %u",
307         acquireLastFlushedBufSequence_, detachReserveSlotNum_);
308     std::map<BufferState, int32_t> bufferState;
309     for (auto &[id, ele] : bufferQueueCache_) {
310         SURFACE_TRACE_NAME_FMT("request buffer id: %u state: %u", id, ele.state);
311         bufferState[ele.state] += 1;
312     }
313     std::string str = std::to_string(uniqueId_) +
314         ", Released: " + std::to_string(bufferState[BUFFER_STATE_RELEASED]) +
315         " Requested: " + std::to_string(bufferState[BUFFER_STATE_REQUESTED]) +
316         " Flushed: " + std::to_string(bufferState[BUFFER_STATE_FLUSHED]) +
317         " Acquired: " + std::to_string(bufferState[BUFFER_STATE_ACQUIRED]);
318     if (str.compare(requestBufferStateStr_) != 0) {
319         requestBufferStateStr_ = str;
320         BLOGE("all buffer are using, uniqueId: %{public}s", str.c_str());
321     }
322 }
323 
SetupNewBufferLocked(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,BufferRequestConfig & updateConfig,const BufferRequestConfig & config,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)324 GSError BufferQueue::SetupNewBufferLocked(sptr<SurfaceBuffer> &buffer, sptr<BufferExtraData> &bedata,
325     BufferRequestConfig &updateConfig, const BufferRequestConfig &config,
326     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
327 {
328     GSError ret = AllocBuffer(buffer, nullptr, updateConfig, lock);
329     if (ret == GSERROR_OK) {
330         AddDeletingBuffersLocked(retval.deletingBuffers);
331         SetSurfaceBufferHebcMetaLocked(buffer);
332         SetSurfaceBufferGlobalAlphaUnlocked(buffer);
333         SetReturnValue(buffer, bedata, retval);
334     } else {
335         BLOGE("Fail to alloc or map Buffer[%{public}d %{public}d] ret: %{public}d, uniqueId: %{public}" PRIu64,
336             config.width, config.height, ret, uniqueId_);
337     }
338     return ret;
339 }
340 
ReuseBufferForBlockMode(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,BufferRequestConfig & updateConfig,const BufferRequestConfig & config,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)341 GSError BufferQueue::ReuseBufferForBlockMode(sptr<SurfaceBuffer> &buffer, sptr<BufferExtraData> &bedata,
342     BufferRequestConfig &updateConfig, const BufferRequestConfig &config,
343     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
344 {
345     waitReqCon_.wait_for(lock, std::chrono::milliseconds(config.timeout),
346         [this]() { return WaitForCondition(); });
347     if (!GetStatusLocked() && !isBatch_) {
348         SURFACE_TRACE_NAME_FMT("Status wrong, status: %d", GetStatusLocked());
349         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
350     }
351     // try dequeue from free list again
352     GSError ret = PopFromFreeListLocked(buffer, updateConfig);
353     if (ret == GSERROR_OK) {
354         return ReuseBuffer(updateConfig, bedata, retval, lock);
355     } else if (GetUsedSize() >= bufferQueueSize_ - detachReserveSlotNum_) {
356         RequestBufferDebugInfoLocked();
357         return GSERROR_NO_BUFFER;
358     }
359     return ret;
360 }
361 
ReuseBufferForNoBlockMode(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,BufferRequestConfig & updateConfig,const BufferRequestConfig & config,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)362 GSError BufferQueue::ReuseBufferForNoBlockMode(sptr<SurfaceBuffer> &buffer, sptr<BufferExtraData> &bedata,
363     BufferRequestConfig &updateConfig, const BufferRequestConfig &config,
364     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
365 {
366     GSError ret = PopFromDirtyListLocked(buffer);
367     if (ret == GSERROR_OK) {
368         buffer->SetSurfaceBufferColorGamut(config.colorGamut);
369         buffer->SetSurfaceBufferTransform(config.transform);
370         return ReuseBuffer(updateConfig, bedata, retval, lock);
371     } else if (ret == GSERROR_NO_BUFFER) {
372         LogAndTraceAllBufferInBufferQueueCacheLocked();
373         return GSERROR_NO_BUFFER;
374     }
375     return ret;
376 }
377 
RequestBufferLocked(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)378 GSError BufferQueue::RequestBufferLocked(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
379     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
380 {
381     GSError ret = RequestBufferCheckStatus();
382     if (ret != GSERROR_OK) {
383         return ret;
384     }
385 
386     // check param
387     BufferRequestConfig updateConfig = config;
388     updateConfig.usage |= defaultUsage_;
389     ret = CheckRequestConfig(updateConfig);
390     if (ret != GSERROR_OK) {
391         BLOGE("CheckRequestConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", ret, uniqueId_);
392         return SURFACE_ERROR_UNKOWN;
393     }
394     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
395     SURFACE_TRACE_NAME_FMT("RequestBuffer name: %s queueId: %" PRIu64 " queueSize: %u reserveSlotNum: %u",
396         name_.c_str(), uniqueId_, bufferQueueSize_, detachReserveSlotNum_);
397     // dequeue from free list
398     sptr<SurfaceBuffer>& buffer = retval.buffer;
399     if (!(isPriorityAlloc_ && (GetUsedSize() < bufferQueueSize_ - detachReserveSlotNum_))) {
400         ret = PopFromFreeListLocked(buffer, updateConfig);
401         if (ret == GSERROR_OK) {
402             return ReuseBuffer(updateConfig, bedata, retval, lock);
403         }
404 
405         // check queue size
406         if (GetUsedSize() >= bufferQueueSize_ - detachReserveSlotNum_) {
407             if (!requestBufferNoBlockMode_) {
408                 return ReuseBufferForBlockMode(buffer, bedata, updateConfig, config, retval, lock);
409             } else {
410                 return ReuseBufferForNoBlockMode(buffer, bedata, updateConfig, config, retval, lock);
411             }
412         }
413     }
414 
415     return SetupNewBufferLocked(buffer, bedata, updateConfig, config, retval, lock);
416 }
417 
RequestBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)418 GSError BufferQueue::RequestBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
419     struct IBufferProducer::RequestBufferReturnValue &retval)
420 {
421     if (wpCSurfaceDelegator_ != nullptr) {
422         return DelegatorDequeueBuffer(wpCSurfaceDelegator_, config, bedata, retval);
423     }
424     int64_t startTimeNs = 0;
425     int64_t endTimeNs = 0;
426     if (isActiveGame_) {
427         startTimeNs = std::chrono::duration_cast<std::chrono::nanoseconds>(
428             std::chrono::steady_clock::now().time_since_epoch()).count();
429     }
430     std::unique_lock<std::mutex> lock(mutex_);
431     auto ret = RequestBufferLocked(config, bedata, retval, lock);
432     if (ret == GSERROR_OK && isActiveGame_) {
433         endTimeNs = std::chrono::duration_cast<std::chrono::nanoseconds>(
434             std::chrono::steady_clock::now().time_since_epoch()).count();
435         bufferQueueCache_[retval.sequence].requestTimeNs = endTimeNs - startTimeNs;
436     }
437     return ret;
438 }
439 
SetProducerCacheCleanFlag(bool flag)440 GSError BufferQueue::SetProducerCacheCleanFlag(bool flag)
441 {
442     std::unique_lock<std::mutex> lock(mutex_);
443     return SetProducerCacheCleanFlagLocked(flag, lock);
444 }
445 
SetProducerCacheCleanFlagLocked(bool flag,std::unique_lock<std::mutex> & lock)446 GSError BufferQueue::SetProducerCacheCleanFlagLocked(bool flag, std::unique_lock<std::mutex> &lock)
447 {
448     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
449     producerCacheClean_ = flag;
450     producerCacheList_.clear();
451     return GSERROR_OK;
452 }
453 
CheckProducerCacheListLocked()454 bool BufferQueue::CheckProducerCacheListLocked()
455 {
456     for (auto &[id, _] : bufferQueueCache_) {
457         if (std::find(producerCacheList_.begin(), producerCacheList_.end(), id) == producerCacheList_.end()) {
458             return false;
459         }
460     }
461     return true;
462 }
463 
ReallocBufferLocked(const BufferRequestConfig & config,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)464 GSError BufferQueue::ReallocBufferLocked(const BufferRequestConfig &config,
465     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
466 {
467     bool isBufferNeedRealloc = false;
468     auto mapIter = bufferQueueCache_.find(retval.sequence);
469     if (mapIter != bufferQueueCache_.end()) {
470         isBufferNeedRealloc = mapIter->second.isBufferNeedRealloc;
471         if (isBufferNeedRealloc && mapIter->second.fence != nullptr) {
472             // fence wait time 3000ms
473             int32_t ret = mapIter->second.fence->Wait(3000);
474             if (ret < 0) {
475                 BLOGE("BufferQueue::ReallocBufferLocked WaitFence timeout 3000ms");
476                 isBufferNeedRealloc = false;
477             }
478         }
479     }
480 
481     DeleteBufferInCacheNoWaitForAllocatingState(retval.sequence);
482 
483     sptr<SurfaceBuffer> buffer = nullptr;
484     GSError sret = GSERROR_OK;
485     if (isBufferNeedRealloc) {
486         sret = AllocBuffer(buffer, retval.buffer, config, lock);
487     } else {
488         sret = AllocBuffer(buffer, nullptr, config, lock);
489     }
490 
491     if (sret != GSERROR_OK) {
492         BLOGE("AllocBuffer failed: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
493         return sret;
494     }
495 
496     retval.buffer = buffer;
497     retval.sequence = buffer->GetSeqNum();
498     bufferQueueCache_[retval.sequence].config = config;
499     return GSERROR_OK;
500 }
501 
AddDeletingBuffersLocked(std::vector<uint32_t> & deletingBuffers)502 void BufferQueue::AddDeletingBuffersLocked(std::vector<uint32_t> &deletingBuffers)
503 {
504     deletingBuffers.reserve(deletingBuffers.size() + deletingList_.size());
505     deletingBuffers.insert(deletingBuffers.end(), deletingList_.begin(), deletingList_.end());
506     deletingList_.clear();
507 }
508 
ReuseBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval,std::unique_lock<std::mutex> & lock)509 GSError BufferQueue::ReuseBuffer(const BufferRequestConfig &config, sptr<BufferExtraData> &bedata,
510     struct IBufferProducer::RequestBufferReturnValue &retval, std::unique_lock<std::mutex> &lock)
511 {
512     if (retval.buffer == nullptr) {
513         BLOGE("input buffer is null, uniqueId: %{public}" PRIu64 ".", uniqueId_);
514         return SURFACE_ERROR_UNKOWN;
515     }
516     retval.sequence = retval.buffer->GetSeqNum();
517     auto mapIter = bufferQueueCache_.find(retval.sequence);
518     if (mapIter == bufferQueueCache_.end()) {
519         BLOGE("cache not find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", retval.sequence, uniqueId_);
520         return SURFACE_ERROR_UNKOWN;
521     }
522     auto &cacheConfig = mapIter->second.config;
523     SURFACE_TRACE_NAME_FMT("ReuseBuffer config width: %d height: %d usage: %llu format: %d id: %u",
524         cacheConfig.width, cacheConfig.height, cacheConfig.usage, cacheConfig.format, retval.sequence);
525 
526     bool needRealloc = (config != mapIter->second.config);
527     // config, realloc
528     if (needRealloc) {
529         auto sret = ReallocBufferLocked(config, retval, lock);
530         if (sret != GSERROR_OK) {
531             return sret;
532         }
533     }
534 
535     bufferQueueCache_[retval.sequence].state = BUFFER_STATE_REQUESTED;
536     retval.fence = bufferQueueCache_[retval.sequence].fence;
537     bedata = retval.buffer->GetExtraData();
538     SetSurfaceBufferHebcMetaLocked(retval.buffer);
539     SetSurfaceBufferGlobalAlphaUnlocked(retval.buffer);
540 
541     auto &dbs = retval.deletingBuffers;
542     AddDeletingBuffersLocked(dbs);
543 
544     if (needRealloc || producerCacheClean_ || retval.buffer->GetConsumerAttachBufferFlag() ||
545         bufferQueueCache_[retval.sequence].isPreAllocBuffer) {
546         if (producerCacheClean_) {
547             producerCacheList_.push_back(retval.sequence);
548             if (CheckProducerCacheListLocked()) {
549                 SetProducerCacheCleanFlagLocked(false, lock);
550             }
551         }
552         retval.buffer->SetConsumerAttachBufferFlag(false);
553         bufferQueueCache_[retval.sequence].isPreAllocBuffer = false;
554     } else {
555         retval.buffer = nullptr;
556     }
557 
558     SURFACE_TRACE_NAME_FMT("%s:%u", name_.c_str(), retval.sequence);
559     return GSERROR_OK;
560 }
561 
CancelBufferLocked(uint32_t sequence,sptr<BufferExtraData> bedata)562 GSError BufferQueue::CancelBufferLocked(uint32_t sequence, sptr<BufferExtraData> bedata)
563 {
564     auto mapIter = bufferQueueCache_.find(sequence);
565     if (mapIter == bufferQueueCache_.end()) {
566         return SURFACE_ERROR_BUFFER_NOT_INCACHE;
567     }
568 
569     if (mapIter->second.state != BUFFER_STATE_REQUESTED && mapIter->second.state != BUFFER_STATE_ATTACHED) {
570         return SURFACE_ERROR_BUFFER_STATE_INVALID;
571     }
572     mapIter->second.state = BUFFER_STATE_RELEASED;
573     freeList_.push_back(sequence);
574     if (mapIter->second.buffer == nullptr) {
575         BLOGE("cache buffer is nullptr, sequence:%{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
576         return SURFACE_ERROR_UNKOWN;
577     }
578     mapIter->second.buffer->SetExtraData(bedata);
579 
580     waitReqCon_.notify_all();
581     waitAttachCon_.notify_all();
582 
583     return GSERROR_OK;
584 }
585 
CancelBuffer(uint32_t sequence,sptr<BufferExtraData> bedata)586 GSError BufferQueue::CancelBuffer(uint32_t sequence, sptr<BufferExtraData> bedata)
587 {
588     SURFACE_TRACE_NAME_FMT("CancelBuffer name: %s queueId: %" PRIu64 " sequence: %u",
589         name_.c_str(), uniqueId_, sequence);
590     std::lock_guard<std::mutex> lockGuard(mutex_);
591     return CancelBufferLocked(sequence, bedata);
592 }
593 
CheckBufferQueueCacheLocked(uint32_t sequence)594 GSError BufferQueue::CheckBufferQueueCacheLocked(uint32_t sequence)
595 {
596     auto mapIter = bufferQueueCache_.find(sequence);
597     if (mapIter == bufferQueueCache_.end()) {
598         BLOGE("no find seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
599         return SURFACE_ERROR_BUFFER_NOT_INCACHE;
600     }
601 
602     auto &state = mapIter->second.state;
603     if (state != BUFFER_STATE_REQUESTED && state != BUFFER_STATE_ATTACHED) {
604         BLOGE("seq: %{public}u, invalid state %{public}d, uniqueId: %{public}" PRIu64 ".",
605             sequence, state, uniqueId_);
606         return SURFACE_ERROR_BUFFER_STATE_INVALID;
607     }
608     return GSERROR_OK;
609 }
610 
CheckBufferQueueCache(uint32_t sequence)611 GSError BufferQueue::CheckBufferQueueCache(uint32_t sequence)
612 {
613     std::lock_guard<std::mutex> lockGuard(mutex_);
614     return CheckBufferQueueCacheLocked(sequence);
615 }
616 
DelegatorQueueBuffer(uint32_t sequence,sptr<SyncFence> fence)617 GSError BufferQueue::DelegatorQueueBuffer(uint32_t sequence, sptr<SyncFence> fence)
618 {
619     auto consumerDelegator = wpCSurfaceDelegator_.promote();
620     if (consumerDelegator == nullptr) {
621         BLOGE("Consumer surface delegator has been expired");
622         return GSERROR_INVALID_ARGUMENTS;
623     }
624     sptr<SurfaceBuffer> buffer = nullptr;
625     {
626         std::lock_guard<std::mutex> lockGuard(mutex_);
627         auto mapIter = bufferQueueCache_.find(sequence);
628         if (mapIter == bufferQueueCache_.end()) {
629             return GSERROR_NO_ENTRY;
630         }
631         mapIter->second.state = BUFFER_STATE_ACQUIRED;
632         buffer = mapIter->second.buffer;
633     }
634     GSError ret = consumerDelegator->QueueBuffer(buffer, fence->Get());
635     if (ret != GSERROR_OK) {
636         BLOGE("Consumer surface delegator failed to queuebuffer");
637     }
638     ret = ReleaseBuffer(buffer, SyncFence::InvalidFence());
639     if (ret != GSERROR_OK) {
640         BLOGE("Consumer surface delegator failed to releasebuffer");
641     }
642     return ret;
643 }
644 
CallConsumerListener()645 void BufferQueue::CallConsumerListener()
646 {
647     SURFACE_TRACE_NAME_FMT("CallConsumerListener");
648     sptr<IBufferConsumerListener> listener;
649     IBufferConsumerListenerClazz *listenerClazz;
650     {
651         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
652         listener = listener_;
653         listenerClazz = listenerClazz_;
654     }
655     if (listener != nullptr) {
656         listener->OnBufferAvailable();
657     } else if (listenerClazz != nullptr) {
658         listenerClazz->OnBufferAvailable();
659     }
660 }
661 
FlushBuffer(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config)662 GSError BufferQueue::FlushBuffer(uint32_t sequence, sptr<BufferExtraData> bedata,
663     sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config)
664 {
665     SURFACE_TRACE_NAME_FMT("FlushBuffer name: %s queueId: %" PRIu64 " sequence: %u",
666         name_.c_str(), uniqueId_, sequence);
667     {
668         std::lock_guard<std::mutex> lockGuard(mutex_);
669         if (!GetStatusLocked()) {
670             SURFACE_TRACE_NAME_FMT("status: %d", GetStatusLocked());
671             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
672         }
673     }
674     // check param
675     auto sret = CheckFlushConfig(config);
676     if (sret != GSERROR_OK) {
677         BLOGE("CheckFlushConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
678         return sret;
679     }
680 
681     sret = CheckBufferQueueCache(sequence);
682     if (sret != GSERROR_OK) {
683         return sret;
684     }
685 
686     bool listenerNullCheck = false;
687     {
688         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
689         if (listener_ == nullptr && listenerClazz_ == nullptr) {
690             listenerNullCheck = true;
691         }
692     }
693     if (listenerNullCheck) {
694         SURFACE_TRACE_NAME("listener is nullptr");
695         BLOGE("listener is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
696         CancelBuffer(sequence, bedata);
697         return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
698     }
699     sret = DoFlushBuffer(sequence, bedata, fence, config);
700     if (sret != GSERROR_OK) {
701         return sret;
702     }
703     CallConsumerListener();
704 
705     if (wpCSurfaceDelegator_ != nullptr) {
706         sret = DelegatorQueueBuffer(sequence, fence);
707     }
708     return sret;
709 }
710 
GetLastFlushedBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,float matrix[16],uint32_t matrixSize,bool isUseNewMatrix,bool needRecordSequence)711 GSError BufferQueue::GetLastFlushedBuffer(sptr<SurfaceBuffer>& buffer,
712     sptr<SyncFence>& fence, float matrix[16], uint32_t matrixSize, bool isUseNewMatrix, bool needRecordSequence)
713 {
714     std::lock_guard<std::mutex> lockGuard(mutex_);
715     if (needRecordSequence && acquireLastFlushedBufSequence_ != INVALID_SEQUENCE) {
716         BLOGE("last flushed buffer(%{public}d) is using, uniqueId: %{public}" PRIu64 ".",
717             acquireLastFlushedBufSequence_, uniqueId_);
718         return SURFACE_ERROR_BUFFER_STATE_INVALID;
719     }
720     auto mapIter = bufferQueueCache_.find(lastFlusedSequence_);
721     if (mapIter == bufferQueueCache_.end()) {
722         BLOGE("cache ont find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", lastFlusedSequence_, uniqueId_);
723         return SURFACE_ERROR_UNKOWN;
724     }
725     auto &state = mapIter->second.state;
726     if (state == BUFFER_STATE_REQUESTED) {
727         BLOGE("seq: %{public}u, invalid state %{public}d, uniqueId: %{public}" PRIu64 ".",
728             lastFlusedSequence_, state, uniqueId_);
729         return SURFACE_ERROR_BUFFER_STATE_INVALID;
730     }
731     buffer = mapIter->second.buffer;
732     auto usage = buffer->GetUsage();
733     if (usage & BUFFER_USAGE_PROTECTED) {
734         BLOGE("lastFlusedSeq: %{public}u, usage: %{public}" PRIu64 ", uniqueId: %{public}" PRIu64 ".",
735             lastFlusedSequence_, usage, uniqueId_);
736         return SURFACE_ERROR_NOT_SUPPORT;
737     }
738 
739     fence = lastFlusedFence_;
740     Rect damage = {};
741     damage.w = buffer->GetWidth();
742     damage.h = buffer->GetHeight();
743 
744     auto utils = SurfaceUtils::GetInstance();
745     if (isUseNewMatrix) {
746         utils->ComputeTransformMatrixV2(matrix, matrixSize, buffer, lastFlushedTransform_, damage);
747     } else {
748         utils->ComputeTransformMatrix(matrix, matrixSize, buffer, lastFlushedTransform_, damage);
749     }
750 
751     if (needRecordSequence) {
752         acquireLastFlushedBufSequence_ = lastFlusedSequence_;
753         SURFACE_TRACE_NAME_FMT("GetLastFlushedBuffer(needRecordSequence) name: %s queueId: %" PRIu64 " seq: %u",
754             name_.c_str(), uniqueId_, acquireLastFlushedBufSequence_);
755     }
756     return GSERROR_OK;
757 }
758 
AcquireLastFlushedBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,float matrix[16],uint32_t matrixSize,bool isUseNewMatrix)759 GSError BufferQueue::AcquireLastFlushedBuffer(sptr<SurfaceBuffer> &buffer, sptr<SyncFence> &fence,
760     float matrix[16], uint32_t matrixSize, bool isUseNewMatrix)
761 {
762     return GetLastFlushedBuffer(buffer, fence, matrix, matrixSize, isUseNewMatrix, true);
763 }
764 
ReleaseLastFlushedBuffer(uint32_t sequence)765 GSError BufferQueue::ReleaseLastFlushedBuffer(uint32_t sequence)
766 {
767     SURFACE_TRACE_NAME_FMT("ReleaseLastFlushedBuffer name: %s queueId: %" PRIu64 " seq: %u",
768         name_.c_str(), uniqueId_, sequence);
769     std::lock_guard<std::mutex> lockGuard(mutex_);
770     if (acquireLastFlushedBufSequence_ == INVALID_SEQUENCE || acquireLastFlushedBufSequence_ != sequence) {
771         BLOGE("ReleaseLastFlushedBuffer lastFlushBuffer:%{public}d sequence:%{public}d, uniqueId: %{public}" PRIu64,
772             acquireLastFlushedBufSequence_, sequence, uniqueId_);
773         return SURFACE_ERROR_BUFFER_STATE_INVALID;
774     }
775     acquireLastFlushedBufSequence_ = INVALID_SEQUENCE;
776     waitReqCon_.notify_all();
777     return GSERROR_OK;
778 }
779 
DoFlushBufferLocked(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config,std::unique_lock<std::mutex> & lock)780 GSError BufferQueue::DoFlushBufferLocked(uint32_t sequence, sptr<BufferExtraData> bedata,
781     sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config, std::unique_lock<std::mutex> &lock)
782 {
783     auto mapIter = bufferQueueCache_.find(sequence);
784     if (mapIter == bufferQueueCache_.end()) {
785         BLOGE("bufferQueueCache not find sequence:%{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
786         return SURFACE_ERROR_BUFFER_NOT_INCACHE;
787     }
788     if (mapIter->second.isDeleting) {
789         DeleteBufferInCache(sequence, lock);
790         BLOGD("DoFlushBuffer delete seq: %{public}d, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
791         CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
792         return GSERROR_OK;
793     }
794 
795     mapIter->second.buffer->SetExtraData(bedata);
796     int32_t supportFastCompose = 0;
797     mapIter->second.buffer->GetExtraData()->ExtraGet(
798         BUFFER_SUPPORT_FASTCOMPOSE, supportFastCompose);
799         mapIter->second.buffer->SetSurfaceBufferTransform(transform_);
800 
801     uint64_t usage = static_cast<uint32_t>(mapIter->second.config.usage);
802     if (usage & BUFFER_USAGE_CPU_WRITE) {
803         // api flush
804         auto sret = mapIter->second.buffer->FlushCache();
805         if (sret != GSERROR_OK) {
806             BLOGE("FlushCache ret: %{public}d, seq: %{public}u, uniqueId: %{public}" PRIu64 ".",
807                 sret, sequence, uniqueId_);
808             return sret;
809         }
810     }
811     // if failed, avoid to state rollback
812     mapIter->second.state = BUFFER_STATE_FLUSHED;
813     mapIter->second.fence = fence;
814     mapIter->second.damages = config.damages;
815     dirtyList_.push_back(sequence);
816     lastFlusedSequence_ = sequence;
817     lastFlusedFence_ = fence;
818     lastFlushedTransform_ = transform_;
819     bufferSupportFastCompose_ = (bool)supportFastCompose;
820 
821     SetDesiredPresentTimestampAndUiTimestamp(sequence, config.desiredPresentTimestamp, config.timestamp);
822     lastFlushedDesiredPresentTimeStamp_ = mapIter->second.desiredPresentTimestamp;
823     // if you need dump SurfaceBuffer to file, you should execute hdc shell param set persist.dumpbuffer.enabled 1
824     // and reboot your device
825     static bool dumpBufferEnabled = system::GetParameter("persist.dumpbuffer.enabled", "0") != "0";
826     if (dumpBufferEnabled) {
827         // Wait for the status of the fence to change to SIGNALED.
828         fence->Wait(-1);
829         DumpToFileAsync(GetRealPid(), name_, mapIter->second.buffer);
830     }
831 
832     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
833     return GSERROR_OK;
834 }
835 
DoFlushBuffer(uint32_t sequence,sptr<BufferExtraData> bedata,sptr<SyncFence> fence,const BufferFlushConfigWithDamages & config)836 GSError BufferQueue::DoFlushBuffer(uint32_t sequence, sptr<BufferExtraData> bedata,
837     sptr<SyncFence> fence, const BufferFlushConfigWithDamages &config)
838 {
839     SURFACE_TRACE_NAME_FMT("DoFlushBuffer name: %s queueId: %" PRIu64 " seq: %u",
840         name_.c_str(), uniqueId_, sequence);
841     std::unique_lock<std::mutex> lock(mutex_);
842     int64_t startTimeNs = 0;
843     int64_t endTimeNs = 0;
844     if (isActiveGame_) {
845         startTimeNs = std::chrono::duration_cast<std::chrono::nanoseconds>(
846             std::chrono::steady_clock::now().time_since_epoch()).count();
847     }
848     auto ret = DoFlushBufferLocked(sequence, bedata, fence, config, lock);
849     if (ret == GSERROR_OK && isActiveGame_) {
850         endTimeNs = std::chrono::duration_cast<std::chrono::nanoseconds>(
851             std::chrono::steady_clock::now().time_since_epoch()).count();
852         bufferQueueCache_[sequence].flushTimeNs = endTimeNs - startTimeNs;
853     }
854     return ret;
855 }
856 
SetDesiredPresentTimestampAndUiTimestamp(uint32_t sequence,int64_t desiredPresentTimestamp,uint64_t uiTimestamp)857 void BufferQueue::SetDesiredPresentTimestampAndUiTimestamp(uint32_t sequence, int64_t desiredPresentTimestamp,
858                                                            uint64_t uiTimestamp)
859 {
860     auto mapIter = bufferQueueCache_.find(sequence);
861     mapIter->second.isAutoTimestamp = false;
862     if (desiredPresentTimestamp <= 0) {
863         if (desiredPresentTimestamp == 0 && uiTimestamp != 0
864             && uiTimestamp <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
865             mapIter->second.desiredPresentTimestamp = static_cast<int64_t>(uiTimestamp);
866         } else {
867             mapIter->second.desiredPresentTimestamp = std::chrono::duration_cast<std::chrono::nanoseconds>(
868                 std::chrono::steady_clock::now().time_since_epoch()).count();
869             mapIter->second.isAutoTimestamp = true;
870         }
871     } else {
872         mapIter->second.desiredPresentTimestamp = desiredPresentTimestamp;
873     }
874     mapIter->second.timestamp = static_cast<int64_t>(uiTimestamp);
875 }
876 
LogAndTraceAllBufferInBufferQueueCacheLocked()877 void BufferQueue::LogAndTraceAllBufferInBufferQueueCacheLocked()
878 {
879     std::map<BufferState, int32_t> bufferState;
880     for (auto &[id, ele] : bufferQueueCache_) {
881         SURFACE_TRACE_NAME_FMT("acquire buffer id: %d state: %d desiredPresentTimestamp: %" PRId64
882             " isAotuTimestamp: %d", id, ele.state, ele.desiredPresentTimestamp, ele.isAutoTimestamp);
883         bufferState[ele.state] += 1;
884     }
885     std::string str = std::to_string(uniqueId_) +
886         ", Released: " + std::to_string(bufferState[BUFFER_STATE_RELEASED]) +
887         " Requested: " + std::to_string(bufferState[BUFFER_STATE_REQUESTED]) +
888         " Flushed: " + std::to_string(bufferState[BUFFER_STATE_FLUSHED]) +
889         " Acquired: " + std::to_string(bufferState[BUFFER_STATE_ACQUIRED]);
890     if (str.compare(acquireBufferStateStr_) != 0) {
891         acquireBufferStateStr_ = str;
892         BLOGE("there is no dirty buffer or no dirty buffer ready, uniqueId: %{public}s", str.c_str());
893     }
894 }
895 
AcquireBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,std::vector<Rect> & damages)896 GSError BufferQueue::AcquireBuffer(sptr<SurfaceBuffer> &buffer,
897     sptr<SyncFence> &fence, int64_t &timestamp, std::vector<Rect> &damages)
898 {
899     std::lock_guard<std::mutex> lockGuard(mutex_);
900     return AcquireBufferLocked(buffer, fence, timestamp, damages);
901 }
902 
AcquireBufferLocked(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,std::vector<Rect> & damages)903 GSError BufferQueue::AcquireBufferLocked(sptr<SurfaceBuffer> &buffer,
904     sptr<SyncFence> &fence, int64_t &timestamp, std::vector<Rect> &damages)
905 {
906     SURFACE_TRACE_NAME_FMT("AcquireBuffer name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
907     // dequeue from dirty list
908     GSError ret = PopFromDirtyListLocked(buffer);
909     if (ret == GSERROR_OK) {
910         uint32_t sequence = buffer->GetSeqNum();
911         auto mapIter = bufferQueueCache_.find(sequence);
912         mapIter->second.state = BUFFER_STATE_ACQUIRED;
913         mapIter->second.lastAcquireTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
914             std::chrono::steady_clock::now().time_since_epoch()).count();
915 
916         fence = mapIter->second.fence;
917         timestamp = mapIter->second.timestamp;
918         damages = mapIter->second.damages;
919         SURFACE_TRACE_NAME_FMT("acquire buffer sequence: %u desiredPresentTimestamp: %" PRId64 " isAotuTimestamp: %d",
920             sequence, mapIter->second.desiredPresentTimestamp,
921             mapIter->second.isAutoTimestamp);
922     } else if (ret == GSERROR_NO_BUFFER) {
923         LogAndTraceAllBufferInBufferQueueCacheLocked();
924     }
925 
926     CountTrace(HITRACE_TAG_GRAPHIC_AGP, name_, static_cast<int32_t>(dirtyList_.size()));
927     return ret;
928 }
929 
AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue & returnValue,int64_t expectPresentTimestamp,bool isUsingAutoTimestamp)930 GSError BufferQueue::AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue &returnValue,
931                                    int64_t expectPresentTimestamp, bool isUsingAutoTimestamp)
932 {
933     SURFACE_TRACE_NAME_FMT("AcquireBuffer with PresentTimestamp name: %s queueId: %" PRIu64 " queueSize: %u"
934         "expectPresentTimestamp: %" PRId64, name_.c_str(), uniqueId_, bufferQueueSize_, expectPresentTimestamp);
935     if (expectPresentTimestamp <= 0) {
936         return AcquireBuffer(returnValue.buffer, returnValue.fence, returnValue.timestamp, returnValue.damages);
937     }
938     std::vector<BufferAndFence> dropBuffers;
939     {
940         std::lock_guard<std::mutex> lockGuard(mutex_);
941         std::list<uint32_t>::iterator frontSequence = dirtyList_.begin();
942         if (frontSequence == dirtyList_.end()) {
943             LogAndTraceAllBufferInBufferQueueCacheLocked();
944             return GSERROR_NO_BUFFER;
945         }
946         auto mapIter = bufferQueueCache_.find(*frontSequence);
947         int64_t frontDesiredPresentTimestamp = mapIter->second.desiredPresentTimestamp;
948         bool frontIsAutoTimestamp = mapIter->second.isAutoTimestamp;
949         if (!frontIsAutoTimestamp && frontDesiredPresentTimestamp > expectPresentTimestamp
950             && frontDesiredPresentTimestamp - ONE_SECOND_TIMESTAMP <= expectPresentTimestamp) {
951             SURFACE_TRACE_NAME_FMT("Acquire no buffer ready");
952             LogAndTraceAllBufferInBufferQueueCacheLocked();
953             return GSERROR_NO_BUFFER_READY;
954         }
955         while (!(frontIsAutoTimestamp && !isUsingAutoTimestamp)
956             && frontDesiredPresentTimestamp <= expectPresentTimestamp) {
957             BufferElement& frontBufferElement = bufferQueueCache_[*frontSequence];
958             if (++frontSequence == dirtyList_.end()) {
959                 BLOGD("Buffer seq(%{public}d) is the last buffer, do acquire.", dirtyList_.front());
960                 break;
961             }
962             BufferElement& secondBufferElement = bufferQueueCache_[*frontSequence];
963             if ((secondBufferElement.isAutoTimestamp && !isUsingAutoTimestamp)
964                 || secondBufferElement.desiredPresentTimestamp > expectPresentTimestamp) {
965                 BLOGD("Next dirty buffer desiredPresentTimestamp: %{public}" PRId64 " not match expectPresentTimestamp"
966                     ": %{public}" PRId64 ".", secondBufferElement.desiredPresentTimestamp, expectPresentTimestamp);
967                 break;
968             }
969             SURFACE_TRACE_NAME_FMT("DropBuffer name: %s queueId: %" PRIu64 " ,buffer seq: %u , buffer "
970                 "desiredPresentTimestamp: %" PRId64 " acquire expectPresentTimestamp: %" PRId64, name_.c_str(),
971                 uniqueId_, frontBufferElement.buffer->GetSeqNum(), frontBufferElement.desiredPresentTimestamp,
972                 expectPresentTimestamp);
973             DropFirstDirtyBuffer(frontBufferElement, secondBufferElement, frontDesiredPresentTimestamp,
974                                  frontIsAutoTimestamp, dropBuffers);
975         }
976         if (!frontIsAutoTimestamp && !IsPresentTimestampReady(frontDesiredPresentTimestamp, expectPresentTimestamp)) {
977             SURFACE_TRACE_NAME_FMT("Acquire no buffer ready");
978             LogAndTraceAllBufferInBufferQueueCacheLocked();
979             return GSERROR_NO_BUFFER_READY;
980         }
981     }
982     ReleaseDropBuffers(dropBuffers);
983     return AcquireBuffer(returnValue.buffer, returnValue.fence, returnValue.timestamp, returnValue.damages);
984 }
985 
AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue & returnValue)986 GSError BufferQueue::AcquireBuffer(IConsumerSurface::AcquireBufferReturnValue &returnValue)
987 {
988     std::lock_guard<std::mutex> lockGuard(mutex_);
989     GSError ret = AcquireBufferLocked(returnValue.buffer, returnValue.fence,
990         returnValue.timestamp, returnValue.damages);
991     if (ret != GSERROR_OK) {
992         return ret;
993     }
994     auto bufferElement = bufferQueueCache_[returnValue.buffer->GetSeqNum()];
995     returnValue.isAutoTimestamp = bufferElement.isAutoTimestamp;
996     returnValue.desiredPresentTimestamp = bufferElement.desiredPresentTimestamp;
997     return GSERROR_OK;
998 }
999 
DropFirstDirtyBuffer(BufferElement & frontBufferElement,BufferElement & secondBufferElement,int64_t & frontDesiredPresentTimestamp,bool & frontIsAutoTimestamp,std::vector<BufferAndFence> & dropBuffers)1000 void BufferQueue::DropFirstDirtyBuffer(BufferElement &frontBufferElement, BufferElement &secondBufferElement,
1001                                        int64_t &frontDesiredPresentTimestamp, bool &frontIsAutoTimestamp,
1002                                        std::vector<BufferAndFence> &dropBuffers)
1003 {
1004     dirtyList_.pop_front();
1005     frontBufferElement.state = BUFFER_STATE_ACQUIRED;
1006     dropBuffers.emplace_back(frontBufferElement.buffer, frontBufferElement.fence);
1007     frontDesiredPresentTimestamp = secondBufferElement.desiredPresentTimestamp;
1008     frontIsAutoTimestamp = secondBufferElement.isAutoTimestamp;
1009 }
1010 
ReleaseDropBuffers(std::vector<BufferAndFence> & dropBuffers)1011 void BufferQueue::ReleaseDropBuffers(std::vector<BufferAndFence> &dropBuffers)
1012 {
1013     for (auto& dropBuffer : dropBuffers) {
1014         auto ret = ReleaseBuffer(dropBuffer.first, dropBuffer.second);
1015         if (ret != GSERROR_OK) {
1016             BLOGE("DropBuffer failed, ret: %{public}d, sequeue: %{public}u, uniqueId: %{public}" PRIu64 ".",
1017                 ret, dropBuffer.first->GetSeqNum(), uniqueId_);
1018         }
1019     }
1020 }
1021 
IsPresentTimestampReady(int64_t desiredPresentTimestamp,int64_t expectPresentTimestamp)1022 bool BufferQueue::IsPresentTimestampReady(int64_t desiredPresentTimestamp, int64_t expectPresentTimestamp)
1023 {
1024     return isBufferUtilPresentTimestampReady(desiredPresentTimestamp, expectPresentTimestamp);
1025 }
1026 
ListenerBufferReleasedCb(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)1027 void BufferQueue::ListenerBufferReleasedCb(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence> &fence)
1028 {
1029     {
1030         std::lock_guard<std::mutex> lockGuard(onBufferReleaseMutex_);
1031         if (onBufferRelease_ != nullptr) {
1032             SURFACE_TRACE_NAME_FMT("OnBufferRelease_ sequence: %u", buffer->GetSeqNum());
1033             sptr<SurfaceBuffer> buf = buffer;
1034             (void)onBufferRelease_(buf);
1035         }
1036     }
1037 
1038     sptr<IProducerListener> listener;
1039     sptr<IProducerListener> listenerBackup;
1040     {
1041         std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1042         listener = producerListener_;
1043         listenerBackup = producerListenerBackup_;
1044     }
1045 
1046     if (listener != nullptr) {
1047         SURFACE_TRACE_NAME_FMT("onBufferReleasedForProducer sequence: %u", buffer->GetSeqNum());
1048         if (listener->OnBufferReleased() != GSERROR_OK) {
1049             BLOGE("seq: %{public}u, OnBufferReleased faile, uniqueId: %{public}" PRIu64 ".",
1050                 buffer->GetSeqNum(), uniqueId_);
1051         }
1052     }
1053 
1054     if (listenerBackup != nullptr) {
1055         SURFACE_TRACE_NAME_FMT("onBufferReleasedBackupForProducer sequence: %u", buffer->GetSeqNum());
1056         if (listenerBackup->OnBufferReleasedWithFence(buffer, fence) != GSERROR_OK) {
1057             BLOGE("seq: %{public}u, OnBufferReleasedWithFence failed, uniqueId: %{public}" PRIu64 ".",
1058                 buffer->GetSeqNum(), uniqueId_);
1059         }
1060     }
1061     std::lock_guard<std::mutex> lockGuard(mutex_);
1062     OnBufferDeleteCbForHardwareThreadLocked(buffer);
1063 }
1064 
OnBufferDeleteCbForHardwareThreadLocked(const sptr<SurfaceBuffer> & buffer) const1065 void BufferQueue::OnBufferDeleteCbForHardwareThreadLocked(const sptr<SurfaceBuffer> &buffer) const
1066 {
1067     if (onBufferDeleteForRSHardwareThread_ != nullptr) {
1068         onBufferDeleteForRSHardwareThread_(buffer->GetSeqNum());
1069     }
1070 }
1071 
ReleaseBuffer(uint32_t sequence,const sptr<SyncFence> & fence)1072 GSError BufferQueue::ReleaseBuffer(uint32_t sequence, const sptr<SyncFence>& fence)
1073 {
1074     sptr<SurfaceBuffer> buffer = nullptr;
1075     {
1076         std::unique_lock<std::mutex> lock(mutex_);
1077         auto mapIter = bufferQueueCache_.find(sequence);
1078         if (mapIter == bufferQueueCache_.end()) {
1079             LogAndTraceAllBufferInBufferQueueCacheLocked();
1080             return SURFACE_ERROR_BUFFER_NOT_INCACHE;
1081         }
1082         BufferElement& bufferElement = mapIter->second;
1083         buffer = bufferElement.buffer;
1084         auto ret = ReleaseBufferLocked(bufferElement, fence, lock);
1085         if (ret != GSERROR_OK) {
1086             return ret;
1087         }
1088     }
1089     ListenerBufferReleasedCb(buffer, fence);
1090     return GSERROR_OK;
1091 }
1092 
ReleaseBuffer(sptr<SurfaceBuffer> & buffer,const sptr<SyncFence> & fence)1093 GSError BufferQueue::ReleaseBuffer(sptr<SurfaceBuffer> &buffer, const sptr<SyncFence>& fence)
1094 {
1095     if (buffer == nullptr) {
1096         return GSERROR_INVALID_ARGUMENTS;
1097     }
1098 
1099     uint32_t sequence = buffer->GetSeqNum();
1100     {
1101         std::unique_lock<std::mutex> lock(mutex_);
1102         auto mapIter = bufferQueueCache_.find(sequence);
1103         if (mapIter == bufferQueueCache_.end()) {
1104             SURFACE_TRACE_NAME_FMT("buffer not found in cache");
1105             BLOGE("cache not find the buffer(%{public}u), uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1106             OnBufferDeleteCbForHardwareThreadLocked(buffer);
1107             return SURFACE_ERROR_BUFFER_NOT_INCACHE;
1108         }
1109         auto ret = ReleaseBufferLocked(mapIter->second, fence, lock);
1110         if (ret != GSERROR_OK) {
1111             return ret;
1112         }
1113     }
1114     ListenerBufferReleasedCb(buffer, fence);
1115     return GSERROR_OK;
1116 }
1117 
ReleaseBufferLocked(BufferElement & bufferElement,const sptr<SyncFence> & fence,std::unique_lock<std::mutex> & lock)1118 GSError BufferQueue::ReleaseBufferLocked(BufferElement &bufferElement, const sptr<SyncFence> &fence,
1119     std::unique_lock<std::mutex> &lock)
1120 {
1121     const auto &state = bufferElement.state;
1122     if (state != BUFFER_STATE_ACQUIRED && state != BUFFER_STATE_ATTACHED) {
1123         SURFACE_TRACE_NAME_FMT("invalid state: %u", state);
1124         BLOGD("invalid state: %{public}d, uniqueId: %{public}" PRIu64 ".", state, uniqueId_);
1125         return SURFACE_ERROR_BUFFER_STATE_INVALID;
1126     }
1127     uint32_t sequence = bufferElement.buffer->GetSeqNum();
1128     SURFACE_TRACE_NAME_FMT("ReleaseBuffer name: %s queueId: %" PRIu64 " seq: %u", name_.c_str(), uniqueId_, sequence);
1129     bufferElement.state = BUFFER_STATE_RELEASED;
1130     auto surfaceBufferSyncFence = bufferElement.buffer->GetSyncFence();
1131     if (fence != nullptr && surfaceBufferSyncFence != nullptr &&
1132         surfaceBufferSyncFence->IsValid()) {
1133         bufferElement.fence =
1134             SyncFence::MergeFence("SurfaceReleaseFence", surfaceBufferSyncFence, fence);
1135     } else {
1136         bufferElement.fence = fence;
1137     }
1138     int64_t now = std::chrono::duration_cast<std::chrono::nanoseconds>(
1139                       std::chrono::steady_clock::now().time_since_epoch())
1140                       .count();
1141     lastConsumeTime_ = now - bufferElement.lastAcquireTime;
1142 
1143     if (bufferElement.isDeleting) {
1144         DeleteBufferInCache(sequence, lock);
1145     } else {
1146         freeList_.push_back(sequence);
1147     }
1148     waitReqCon_.notify_all();
1149     waitAttachCon_.notify_all();
1150     return GSERROR_OK;
1151 }
1152 
AllocBuffer(sptr<SurfaceBuffer> & buffer,const sptr<SurfaceBuffer> & previousBuffer,const BufferRequestConfig & config,std::unique_lock<std::mutex> & lock)1153 GSError BufferQueue::AllocBuffer(sptr<SurfaceBuffer> &buffer, const sptr<SurfaceBuffer>& previousBuffer,
1154     const BufferRequestConfig &config, std::unique_lock<std::mutex> &lock)
1155 {
1156     sptr<SurfaceBuffer> bufferImpl = new SurfaceBufferImpl();
1157     uint32_t sequence = bufferImpl->GetSeqNum();
1158     SURFACE_TRACE_NAME_FMT("AllocBuffer name: %s queueId: %" PRIu64 ", config width: %d height: %d usage: %llu format:"
1159         " %d id: %u", name_.c_str(), uniqueId_, config.width, config.height, config.usage, config.format, sequence);
1160     ScalingMode scalingMode = scalingMode_;
1161     int32_t connectedPid = connectedPid_;
1162     isAllocatingBuffer_ = true;
1163     lock.unlock();
1164     GSError ret = bufferImpl->Alloc(config, previousBuffer);
1165     lock.lock();
1166     isAllocatingBuffer_ = false;
1167     isAllocatingBufferCon_.notify_all();
1168     if (ret != GSERROR_OK) {
1169         BLOGE("Alloc failed, sequence:%{public}u, ret:%{public}d, uniqueId: %{public}" PRIu64 ".",
1170             sequence, ret, uniqueId_);
1171         return SURFACE_ERROR_UNKOWN;
1172     }
1173 
1174     bufferImpl->SetSurfaceBufferScalingMode(scalingMode);
1175     BufferElement ele = {
1176         .buffer = bufferImpl,
1177         .state = BUFFER_STATE_REQUESTED,
1178         .isDeleting = false,
1179         .config = config,
1180         .fence = SyncFence::InvalidFence(),
1181     };
1182 
1183     if (config.usage & BUFFER_USAGE_PROTECTED) {
1184         BLOGD("usage is BUFFER_USAGE_PROTECTED, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1185         bufferQueueCache_[sequence] = ele;
1186         buffer = bufferImpl;
1187         return SURFACE_ERROR_OK;
1188     }
1189 
1190     ret = bufferImpl->Map();
1191     if (ret == GSERROR_OK) {
1192         bufferQueueCache_[sequence] = ele;
1193         buffer = bufferImpl;
1194     } else {
1195         BLOGE("Map failed, seq:%{public}u, ret:%{public}d, uniqueId: %{public}" PRIu64 ".",
1196             sequence, ret, uniqueId_);
1197         return SURFACE_ERROR_UNKOWN;
1198     }
1199 
1200     BufferHandle* bufferHandle = bufferImpl->GetBufferHandle();
1201     if (connectedPid != 0 && bufferHandle != nullptr) {
1202         ioctl(bufferHandle->fd, DMA_BUF_SET_NAME_A, std::to_string(connectedPid).c_str());
1203     }
1204 
1205     return SURFACE_ERROR_OK;
1206 }
1207 
OnBufferDeleteForRS(uint32_t sequence)1208 void BufferQueue::OnBufferDeleteForRS(uint32_t sequence)
1209 {
1210     auto buffer = bufferQueueCache_[sequence].buffer;
1211     if (buffer) {
1212         buffer->SetBufferDeleteFromCacheFlag(true);
1213     }
1214     if (onBufferDeleteForRSMainThread_ != nullptr) {
1215         onBufferDeleteForRSMainThread_(sequence);
1216     }
1217     if (onBufferDeleteForRSHardwareThread_ != nullptr) {
1218         onBufferDeleteForRSHardwareThread_(sequence);
1219     }
1220 }
1221 
DeleteFreeListCacheLocked(uint32_t sequence)1222 void BufferQueue::DeleteFreeListCacheLocked(uint32_t sequence)
1223 {
1224     for (auto iter = freeList_.begin(); iter != freeList_.end(); ++iter) {
1225         if (sequence == *iter) {
1226             freeList_.erase(iter);
1227             break;
1228         }
1229     }
1230 }
1231 
DeleteBufferInCacheNoWaitForAllocatingState(uint32_t sequence)1232 void BufferQueue::DeleteBufferInCacheNoWaitForAllocatingState(uint32_t sequence)
1233 {
1234     auto it = bufferQueueCache_.find(sequence);
1235     if (it != bufferQueueCache_.end()) {
1236         BLOGD("DeleteBufferInCache seq: %{public}d, %{public}u, uniqueId: %{public}" PRIu64 ".",
1237             it->second.isPreAllocBuffer, sequence, uniqueId_);
1238         if (it->second.isPreAllocBuffer) {
1239             bufferQueueCache_.erase(it);
1240             lppBufferCache_.erase(sequence);
1241             DeleteFreeListCacheLocked(sequence);
1242             return;
1243         }
1244         OnBufferDeleteForRS(sequence);
1245         bufferQueueCache_.erase(it);
1246         lppBufferCache_.erase(sequence);
1247         deletingList_.push_back(sequence);
1248     }
1249 }
1250 
DeleteBufferInCache(uint32_t sequence,std::unique_lock<std::mutex> & lock)1251 void BufferQueue::DeleteBufferInCache(uint32_t sequence, std::unique_lock<std::mutex> &lock)
1252 {
1253     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1254     DeleteBufferInCacheNoWaitForAllocatingState(sequence);
1255 }
1256 
GetQueueSize()1257 uint32_t BufferQueue::GetQueueSize()
1258 {
1259     std::unique_lock<std::mutex> lock(mutex_);
1260     return bufferQueueSize_;
1261 }
1262 
DeleteBuffersLocked(int32_t count,std::unique_lock<std::mutex> & lock)1263 void BufferQueue::DeleteBuffersLocked(int32_t count, std::unique_lock<std::mutex> &lock)
1264 {
1265     SURFACE_TRACE_NAME_FMT("DeleteBuffersLocked count: %d", count);
1266     if (count <= 0) {
1267         return;
1268     }
1269 
1270     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1271     while (!freeList_.empty()) {
1272         DeleteBufferInCacheNoWaitForAllocatingState(freeList_.front());
1273         freeList_.pop_front();
1274         count--;
1275         if (count <= 0) {
1276             return;
1277         }
1278     }
1279 
1280     while (!dirtyList_.empty()) {
1281         DeleteBufferInCacheNoWaitForAllocatingState(dirtyList_.front());
1282         dirtyList_.pop_front();
1283         count--;
1284         if (count <= 0) {
1285             return;
1286         }
1287     }
1288 
1289     for (auto&& ele : bufferQueueCache_) {
1290         ele.second.isDeleting = true;
1291         // we don't have to do anything
1292         count--;
1293         if (count <= 0) {
1294             break;
1295         }
1296     }
1297 }
1298 
AttachBufferUpdateStatus(std::unique_lock<std::mutex> & lock,uint32_t sequence,int32_t timeOut,std::map<uint32_t,BufferElement>::iterator & mapIter)1299 GSError BufferQueue::AttachBufferUpdateStatus(std::unique_lock<std::mutex> &lock, uint32_t sequence,
1300     int32_t timeOut, std::map<uint32_t, BufferElement>::iterator &mapIter)
1301 {
1302     BufferState state = mapIter->second.state;
1303     if (state == BUFFER_STATE_RELEASED) {
1304         mapIter->second.state = BUFFER_STATE_ATTACHED;
1305     } else {
1306         waitAttachCon_.wait_for(lock, std::chrono::milliseconds(timeOut),
1307             [&mapIter]() { return (mapIter->second.state == BUFFER_STATE_RELEASED); });
1308         if (mapIter->second.state == BUFFER_STATE_RELEASED) {
1309             mapIter->second.state = BUFFER_STATE_ATTACHED;
1310         } else {
1311             BLOGN_FAILURE_RET(SURFACE_ERROR_BUFFER_STATE_INVALID);
1312         }
1313     }
1314 
1315     DeleteFreeListCacheLocked(sequence);
1316     return GSERROR_OK;
1317 }
1318 
AttachBufferUpdateBufferInfo(sptr<SurfaceBuffer> & buffer,bool needMap)1319 void BufferQueue::AttachBufferUpdateBufferInfo(sptr<SurfaceBuffer>& buffer, bool needMap)
1320 {
1321     if (needMap) {
1322         buffer->Map();
1323     }
1324     buffer->SetSurfaceBufferWidth(buffer->GetWidth());
1325     buffer->SetSurfaceBufferHeight(buffer->GetHeight());
1326 }
1327 
AttachBufferToQueueLocked(sptr<SurfaceBuffer> buffer,InvokerType invokerType,bool needMap)1328 GSError BufferQueue::AttachBufferToQueueLocked(sptr<SurfaceBuffer> buffer, InvokerType invokerType, bool needMap)
1329 {
1330     uint32_t sequence = buffer->GetSeqNum();
1331     if (GetUsedSize() >= bufferQueueSize_) {
1332         BLOGE("seq: %{public}u, buffer queue size:%{public}u, used size:%{public}u,"
1333             "uniqueId: %{public}" PRIu64 ".", sequence, bufferQueueSize_, GetUsedSize(), uniqueId_);
1334         return SURFACE_ERROR_BUFFER_QUEUE_FULL;
1335     }
1336     auto mapIter = bufferQueueCache_.find(sequence);
1337     if (mapIter != bufferQueueCache_.end()) {
1338         BLOGE("seq: %{public}u, buffer is already in cache, uniqueId: %{public}" PRIu64 ".",
1339             sequence, uniqueId_);
1340         return SURFACE_ERROR_BUFFER_IS_INCACHE;
1341     }
1342     buffer->SetSurfaceBufferScalingMode(scalingMode_);
1343     BufferElement ele;
1344     ele = {
1345         .buffer = buffer,
1346         .isDeleting = false,
1347         .config = buffer->GetBufferRequestConfig(),
1348         .fence = SyncFence::InvalidFence(),
1349     };
1350     if (invokerType == InvokerType::PRODUCER_INVOKER) {
1351         ele.state = BUFFER_STATE_REQUESTED;
1352     } else {
1353         ele.state = BUFFER_STATE_ACQUIRED;
1354         if (detachReserveSlotNum_ > 0) {
1355             detachReserveSlotNum_--;
1356         }
1357     }
1358     AttachBufferUpdateBufferInfo(buffer, needMap);
1359     bufferQueueCache_[sequence] = ele;
1360     if (sourceType_ == OHSurfaceSource::OH_SURFACE_SOURCE_LOWPOWERVIDEO) {
1361         lppBufferCache_[sequence] = buffer;
1362     }
1363     return GSERROR_OK;
1364 }
1365 
AttachBufferToQueue(sptr<SurfaceBuffer> buffer,InvokerType invokerType)1366 GSError BufferQueue::AttachBufferToQueue(sptr<SurfaceBuffer> buffer, InvokerType invokerType)
1367 {
1368     SURFACE_TRACE_NAME_FMT("AttachBufferToQueue name: %s queueId: %" PRIu64 " sequence: %u invokerType: %u",
1369         name_.c_str(), uniqueId_, buffer->GetSeqNum(), invokerType);
1370     std::lock_guard<std::mutex> lockGuard(mutex_);
1371     return AttachBufferToQueueLocked(buffer, invokerType, true);
1372 }
1373 
DetachBufferFromQueueLocked(uint32_t sequence,InvokerType invokerType,std::unique_lock<std::mutex> & lock,bool isReserveSlot)1374 GSError BufferQueue::DetachBufferFromQueueLocked(uint32_t sequence, InvokerType invokerType,
1375     std::unique_lock<std::mutex> &lock, bool isReserveSlot)
1376 {
1377     auto mapIter = bufferQueueCache_.find(sequence);
1378     if (mapIter == bufferQueueCache_.end()) {
1379         BLOGE("seq: %{public}u, not find in cache, uniqueId: %{public}" PRIu64 ".",
1380             sequence, uniqueId_);
1381         return SURFACE_ERROR_BUFFER_NOT_INCACHE;
1382     }
1383     if (invokerType == InvokerType::PRODUCER_INVOKER) {
1384         if (mapIter->second.state != BUFFER_STATE_REQUESTED) {
1385             BLOGE("seq: %{public}u, state: %{public}d, uniqueId: %{public}" PRIu64 ".",
1386                 sequence, mapIter->second.state, uniqueId_);
1387             return SURFACE_ERROR_BUFFER_STATE_INVALID;
1388         }
1389         OnBufferDeleteForRS(sequence);
1390         bufferQueueCache_.erase(sequence);
1391         lppBufferCache_.erase(sequence);
1392     } else {
1393         if (mapIter->second.state != BUFFER_STATE_ACQUIRED) {
1394             BLOGE("seq: %{public}u, state: %{public}d, uniqueId: %{public}" PRIu64 ".",
1395                 sequence, mapIter->second.state, uniqueId_);
1396             return SURFACE_ERROR_BUFFER_STATE_INVALID;
1397         }
1398         DeleteBufferInCache(sequence, lock);
1399         if (isReserveSlot) {
1400             detachReserveSlotNum_++;
1401         }
1402     }
1403     return GSERROR_OK;
1404 }
1405 
DetachBufferFromQueue(sptr<SurfaceBuffer> buffer,InvokerType invokerType,bool isReserveSlot)1406 GSError BufferQueue::DetachBufferFromQueue(sptr<SurfaceBuffer> buffer, InvokerType invokerType, bool isReserveSlot)
1407 {
1408     SURFACE_TRACE_NAME_FMT("DetachBufferFromQueue name: %s queueId: %" PRIu64 ""
1409         "sequence: %u invokerType: %u isReserveSlot: %u",
1410         name_.c_str(), uniqueId_, buffer->GetSeqNum(), invokerType, isReserveSlot);
1411     std::unique_lock<std::mutex> lock(mutex_);
1412     uint32_t sequence = buffer->GetSeqNum();
1413     auto ret = DetachBufferFromQueueLocked(sequence, invokerType, lock, isReserveSlot);
1414     if (ret != GSERROR_OK) {
1415         return ret;
1416     }
1417 
1418     return GSERROR_OK;
1419 }
1420 
AttachBuffer(sptr<SurfaceBuffer> & buffer,int32_t timeOut)1421 GSError BufferQueue::AttachBuffer(sptr<SurfaceBuffer> &buffer, int32_t timeOut)
1422 {
1423     SURFACE_TRACE_NAME_FMT("%s", __func__);
1424     {
1425         std::lock_guard<std::mutex> lockGuard(mutex_);
1426         if (!GetStatusLocked()) {
1427             BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
1428         }
1429     }
1430     {
1431         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1432         if (listener_ == nullptr && listenerClazz_ == nullptr) {
1433             BLOGN_FAILURE_RET(SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER);
1434         }
1435     }
1436 
1437     if (buffer == nullptr) {
1438         BLOGN_FAILURE_RET(GSERROR_INVALID_OPERATING);
1439     }
1440 
1441     uint32_t sequence = buffer->GetSeqNum();
1442     std::unique_lock<std::mutex> lock(mutex_);
1443     auto mapIter = bufferQueueCache_.find(sequence);
1444     if (mapIter != bufferQueueCache_.end()) {
1445         return AttachBufferUpdateStatus(lock, sequence, timeOut, mapIter);
1446     }
1447 
1448     buffer->SetSurfaceBufferScalingMode(scalingMode_);
1449     BufferElement ele = {
1450         .buffer = buffer,
1451         .state = BUFFER_STATE_ATTACHED,
1452         .config = {
1453             .width = buffer->GetWidth(), .height = buffer->GetHeight(), .strideAlignment = 0x8,
1454             .format = buffer->GetFormat(), .usage = buffer->GetUsage(), .timeout = timeOut,
1455         },
1456         .damages = { { .w = buffer->GetWidth(), .h = buffer->GetHeight(), } },
1457     };
1458     AttachBufferUpdateBufferInfo(buffer, true);
1459     int32_t usedSize = static_cast<int32_t>(GetUsedSize());
1460     int32_t queueSize = static_cast<int32_t>(bufferQueueSize_);
1461     if (usedSize >= queueSize) {
1462         int32_t freeSize = static_cast<int32_t>(dirtyList_.size() + freeList_.size());
1463         if (freeSize >= usedSize - queueSize + 1) {
1464             DeleteBuffersLocked(usedSize - queueSize + 1, lock);
1465             bufferQueueCache_[sequence] = ele;
1466             return GSERROR_OK;
1467         } else {
1468             BLOGN_FAILURE_RET(GSERROR_OUT_OF_RANGE);
1469         }
1470     } else {
1471         bufferQueueCache_[sequence] = ele;
1472         return GSERROR_OK;
1473     }
1474 }
1475 
DetachBuffer(sptr<SurfaceBuffer> & buffer)1476 GSError BufferQueue::DetachBuffer(sptr<SurfaceBuffer> &buffer)
1477 {
1478     SURFACE_TRACE_NAME_FMT("%s", __func__);
1479     if (buffer == nullptr) {
1480         BLOGN_FAILURE_RET(GSERROR_INVALID_ARGUMENTS);
1481     }
1482 
1483     std::lock_guard<std::mutex> lockGuard(mutex_);
1484     uint32_t sequence = buffer->GetSeqNum();
1485     auto mapIter = bufferQueueCache_.find(sequence);
1486     if (mapIter == bufferQueueCache_.end()) {
1487         return GSERROR_NO_ENTRY;
1488     }
1489 
1490     if (mapIter->second.state == BUFFER_STATE_REQUESTED) {
1491         BLOGD("DetachBuffer requested seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1492     } else if (mapIter->second.state == BUFFER_STATE_ACQUIRED) {
1493         BLOGD("DetachBuffer acquired seq: %{public}u, uniqueId: %{public}" PRIu64 ".", sequence, uniqueId_);
1494     } else {
1495         BLOGE("DetachBuffer invalid state: %{public}d, seq: %{public}u, uniqueId: %{public}" PRIu64 ".",
1496             mapIter->second.state, sequence, uniqueId_);
1497         return GSERROR_NO_ENTRY;
1498     }
1499     OnBufferDeleteForRS(sequence);
1500     bufferQueueCache_.erase(sequence);
1501     lppBufferCache_.erase(sequence);
1502     return GSERROR_OK;
1503 }
1504 
RegisterSurfaceDelegator(sptr<IRemoteObject> client,sptr<Surface> cSurface)1505 GSError BufferQueue::RegisterSurfaceDelegator(sptr<IRemoteObject> client, sptr<Surface> cSurface)
1506 {
1507     sptr<ConsumerSurfaceDelegator> surfaceDelegator = ConsumerSurfaceDelegator::Create();
1508     if (surfaceDelegator == nullptr) {
1509         BLOGE("Failed to register consumer delegator because the surface delegator is nullptr");
1510         return GSERROR_INVALID_ARGUMENTS;
1511     }
1512     if (!surfaceDelegator->SetClient(client)) {
1513         BLOGE("Failed to set client");
1514         return GSERROR_INVALID_ARGUMENTS;
1515     }
1516     if (!surfaceDelegator->SetBufferQueue(this)) {
1517         BLOGE("Failed to set bufferqueue");
1518         return GSERROR_INVALID_ARGUMENTS;
1519     }
1520 
1521     surfaceDelegator->SetSurface(cSurface);
1522     wpCSurfaceDelegator_ = surfaceDelegator;
1523     return GSERROR_OK;
1524 }
1525 
SetQueueSizeLocked(uint32_t queueSize,std::unique_lock<std::mutex> & lock)1526 GSError BufferQueue::SetQueueSizeLocked(uint32_t queueSize, std::unique_lock<std::mutex> &lock)
1527 {
1528     if (maxQueueSize_ != 0 && queueSize > maxQueueSize_) {
1529         BLOGD("queueSize(%{public}d) max than maxQueueSize_(%{public}d), uniqueId: %{public}" PRIu64,
1530             queueSize, maxQueueSize_, uniqueId_);
1531         queueSize = maxQueueSize_;
1532     }
1533     if (queueSize < detachReserveSlotNum_) {
1534         BLOGW("invalid queueSize: %{public}u, reserveSlotNum: %{public}u, uniqueId: %{public}" PRIu64 ".",
1535             queueSize, detachReserveSlotNum_, uniqueId_);
1536         return GSERROR_INVALID_ARGUMENTS;
1537     }
1538     if (bufferQueueSize_ > queueSize) {
1539         DeleteBuffersLocked(bufferQueueSize_ - queueSize, lock);
1540     }
1541     // if increase the queue size, try to wakeup the blocked thread
1542     if (queueSize > bufferQueueSize_) {
1543         bufferQueueSize_ = queueSize;
1544         waitReqCon_.notify_all();
1545     } else {
1546         bufferQueueSize_ = queueSize;
1547     }
1548     return GSERROR_OK;
1549 }
1550 
SetQueueSize(uint32_t queueSize)1551 GSError BufferQueue::SetQueueSize(uint32_t queueSize)
1552 {
1553     if (queueSize == 0 || queueSize > SURFACE_MAX_QUEUE_SIZE) {
1554         BLOGW("queue size: %{public}u, uniqueId: %{public}" PRIu64 ".", queueSize, uniqueId_);
1555         return GSERROR_INVALID_ARGUMENTS;
1556     }
1557     std::unique_lock<std::mutex> lock(mutex_);
1558     return SetQueueSizeLocked(queueSize, lock);
1559 }
1560 
GetName(std::string & name)1561 GSError BufferQueue::GetName(std::string &name)
1562 {
1563     name = name_;
1564     return GSERROR_OK;
1565 }
1566 
RegisterConsumerListener(sptr<IBufferConsumerListener> & listener)1567 GSError BufferQueue::RegisterConsumerListener(sptr<IBufferConsumerListener> &listener)
1568 {
1569     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1570     listener_ = listener;
1571     return GSERROR_OK;
1572 }
1573 
RegisterConsumerListener(IBufferConsumerListenerClazz * listener)1574 GSError BufferQueue::RegisterConsumerListener(IBufferConsumerListenerClazz *listener)
1575 {
1576     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1577     listenerClazz_ = listener;
1578     return GSERROR_OK;
1579 }
1580 
UnregisterConsumerListener()1581 GSError BufferQueue::UnregisterConsumerListener()
1582 {
1583     std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1584     listener_ = nullptr;
1585     listenerClazz_ = nullptr;
1586     return GSERROR_OK;
1587 }
1588 
RegisterReleaseListener(OnReleaseFunc func)1589 GSError BufferQueue::RegisterReleaseListener(OnReleaseFunc func)
1590 {
1591     std::lock_guard<std::mutex> lockGuard(onBufferReleaseMutex_);
1592     onBufferRelease_ = func;
1593     return GSERROR_OK;
1594 }
1595 
RegisterProducerReleaseListener(sptr<IProducerListener> listener)1596 GSError BufferQueue::RegisterProducerReleaseListener(sptr<IProducerListener> listener)
1597 {
1598     std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1599     producerListener_ = listener;
1600     return GSERROR_OK;
1601 }
1602 
RegisterProducerPropertyListener(sptr<IProducerListener> listener,uint64_t producerId)1603 GSError BufferQueue::RegisterProducerPropertyListener(sptr<IProducerListener> listener, uint64_t producerId)
1604 {
1605     std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1606     return BufferUtilRegisterPropertyListener(listener, producerId, propertyChangeListeners_);
1607 }
1608 
UnRegisterProducerPropertyListener(uint64_t producerId)1609 GSError BufferQueue::UnRegisterProducerPropertyListener(uint64_t producerId)
1610 {
1611     std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1612     return BufferUtilUnRegisterPropertyListener(producerId, propertyChangeListeners_);
1613 }
1614 
RegisterProducerReleaseListenerBackup(sptr<IProducerListener> listener)1615 GSError BufferQueue::RegisterProducerReleaseListenerBackup(sptr<IProducerListener> listener)
1616 {
1617     std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1618     producerListenerBackup_ = listener;
1619     return GSERROR_OK;
1620 }
1621 
UnRegisterProducerReleaseListener()1622 GSError BufferQueue::UnRegisterProducerReleaseListener()
1623 {
1624     std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1625     producerListener_ = nullptr;
1626     return GSERROR_OK;
1627 }
1628 
UnRegisterProducerReleaseListenerBackup()1629 GSError BufferQueue::UnRegisterProducerReleaseListenerBackup()
1630 {
1631     std::lock_guard<std::mutex> lockGuard(producerListenerMutex_);
1632     producerListenerBackup_ = nullptr;
1633     return GSERROR_OK;
1634 }
1635 
RegisterDeleteBufferListener(OnDeleteBufferFunc func,bool isForUniRedraw)1636 GSError BufferQueue::RegisterDeleteBufferListener(OnDeleteBufferFunc func, bool isForUniRedraw)
1637 {
1638     std::lock_guard<std::mutex> lockGuard(mutex_);
1639     if (isForUniRedraw) {
1640         if (onBufferDeleteForRSHardwareThread_ != nullptr) {
1641             return GSERROR_OK;
1642         }
1643         onBufferDeleteForRSHardwareThread_ = func;
1644     } else {
1645         if (onBufferDeleteForRSMainThread_ != nullptr) {
1646             return GSERROR_OK;
1647         }
1648         onBufferDeleteForRSMainThread_ = func;
1649     }
1650     return GSERROR_OK;
1651 }
1652 
SetDefaultWidthAndHeight(int32_t width,int32_t height)1653 GSError BufferQueue::SetDefaultWidthAndHeight(int32_t width, int32_t height)
1654 {
1655     if (width <= 0) {
1656         BLOGW("width is %{public}d, uniqueId: %{public}" PRIu64 ".", width, uniqueId_);
1657         return GSERROR_INVALID_ARGUMENTS;
1658     }
1659 
1660     if (height <= 0) {
1661         BLOGW("height is %{public}d, uniqueId: %{public}" PRIu64 ".", height, uniqueId_);
1662         return GSERROR_INVALID_ARGUMENTS;
1663     }
1664     std::lock_guard<std::mutex> lockGuard(mutex_);
1665     defaultWidth_ = width;
1666     defaultHeight_ = height;
1667     return GSERROR_OK;
1668 }
1669 
GetDefaultWidth()1670 int32_t BufferQueue::GetDefaultWidth()
1671 {
1672     std::lock_guard<std::mutex> lockGuard(mutex_);
1673     return defaultWidth_;
1674 }
1675 
GetDefaultHeight()1676 int32_t BufferQueue::GetDefaultHeight()
1677 {
1678     std::lock_guard<std::mutex> lockGuard(mutex_);
1679     return defaultHeight_;
1680 }
1681 
SetDefaultUsage(uint64_t usage)1682 GSError BufferQueue::SetDefaultUsage(uint64_t usage)
1683 {
1684     std::lock_guard<std::mutex> lockGuard(mutex_);
1685     defaultUsage_ = usage;
1686     return GSERROR_OK;
1687 }
1688 
GetDefaultUsage()1689 uint64_t BufferQueue::GetDefaultUsage()
1690 {
1691     std::lock_guard<std::mutex> lockGuard(mutex_);
1692     return defaultUsage_;
1693 }
1694 
ClearLocked(std::unique_lock<std::mutex> & lock)1695 void BufferQueue::ClearLocked(std::unique_lock<std::mutex> &lock)
1696 {
1697     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
1698     for (auto &[id, _] : bufferQueueCache_) {
1699         OnBufferDeleteForRS(id);
1700     }
1701     bufferQueueCache_.clear();
1702     freeList_.clear();
1703     lppBufferCache_.clear();
1704     dirtyList_.clear();
1705     deletingList_.clear();
1706 }
1707 
GoBackground()1708 GSError BufferQueue::GoBackground()
1709 {
1710     sptr<IBufferConsumerListener> listener;
1711     IBufferConsumerListenerClazz *listenerClazz;
1712     {
1713         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1714         listener = listener_;
1715         listenerClazz = listenerClazz_;
1716     }
1717     if (listener != nullptr) {
1718         SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1719         listener->OnGoBackground();
1720     } else if (listenerClazz != nullptr) {
1721         SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1722         listenerClazz->OnGoBackground();
1723     }
1724     std::unique_lock<std::mutex> lock(mutex_);
1725     ClearLocked(lock);
1726     waitReqCon_.notify_all();
1727     SetProducerCacheCleanFlagLocked(false, lock);
1728     return GSERROR_OK;
1729 }
1730 
CleanCache(bool cleanAll,uint32_t * bufSeqNum)1731 GSError BufferQueue::CleanCache(bool cleanAll, uint32_t *bufSeqNum)
1732 {
1733     sptr<IBufferConsumerListener> listener;
1734     IBufferConsumerListenerClazz *listenerClazz;
1735     {
1736         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1737         listener = listener_;
1738         listenerClazz = listenerClazz_;
1739     }
1740     if (cleanAll) {
1741         if (listener != nullptr) {
1742             SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1743             listener->OnGoBackground();
1744         } else if (listenerClazz != nullptr) {
1745             SURFACE_TRACE_NAME_FMT("OnGoBackground name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1746             listenerClazz->OnGoBackground();
1747         }
1748     } else {
1749         if (listener != nullptr) {
1750             SURFACE_TRACE_NAME_FMT("OnCleanCache name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1751             listener->OnCleanCache(bufSeqNum);
1752         } else if (listenerClazz != nullptr) {
1753             SURFACE_TRACE_NAME_FMT("OnCleanCache name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
1754             listenerClazz->OnCleanCache(bufSeqNum);
1755         }
1756     }
1757     std::unique_lock<std::mutex> lock(mutex_);
1758     if (!cleanAll && bufSeqNum != nullptr) {
1759         MarkBufferReclaimableByIdLocked(*bufSeqNum);
1760     }
1761     ClearLocked(lock);
1762     waitReqCon_.notify_all();
1763     return GSERROR_OK;
1764 }
1765 
OnConsumerDied()1766 GSError BufferQueue::OnConsumerDied()
1767 {
1768     std::unique_lock<std::mutex> lock(mutex_);
1769     ClearLocked(lock);
1770     waitReqCon_.notify_all();
1771     return GSERROR_OK;
1772 }
1773 
IsSurfaceBufferInCache(uint32_t seqNum,bool & isInCache)1774 GSError BufferQueue::IsSurfaceBufferInCache(uint32_t seqNum, bool &isInCache)
1775 {
1776     std::unique_lock<std::mutex> lock(mutex_);
1777     if (bufferQueueCache_.find(seqNum) != bufferQueueCache_.end()) {
1778         isInCache = true;
1779     } else {
1780         isInCache = false;
1781     }
1782     return GSERROR_OK;
1783 }
1784 
GetUniqueId() const1785 uint64_t BufferQueue::GetUniqueId() const
1786 {
1787     std::unique_lock<std::mutex> lock(mutex_);
1788     return uniqueId_;
1789 }
1790 
SetTransform(GraphicTransformType transform)1791 GSError BufferQueue::SetTransform(GraphicTransformType transform)
1792 {
1793     {
1794         std::unique_lock<std::mutex> lock(mutex_);
1795         if (transform_ == transform) {
1796             return GSERROR_OK;
1797         }
1798 
1799         transform_ = transform;
1800     }
1801     sptr<IBufferConsumerListener> listener;
1802     IBufferConsumerListenerClazz *listenerClazz;
1803     {
1804         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
1805         listener = listener_;
1806         listenerClazz = listenerClazz_;
1807     }
1808     if (listener != nullptr) {
1809         SURFACE_TRACE_NAME_FMT("OnTransformChange transform: %u", transform);
1810         listener->OnTransformChange();
1811     } else if (listenerClazz != nullptr) {
1812         SURFACE_TRACE_NAME_FMT("OnTransformChange transform: %u", transform);
1813         listenerClazz->OnTransformChange();
1814     }
1815     return GSERROR_OK;
1816 }
1817 
GetTransform() const1818 GraphicTransformType BufferQueue::GetTransform() const
1819 {
1820     std::unique_lock<std::mutex> lock(mutex_);
1821     return transform_;
1822 }
1823 
SetTransformHint(GraphicTransformType transformHint,uint64_t producerId)1824 GSError BufferQueue::SetTransformHint(GraphicTransformType transformHint, uint64_t producerId)
1825 {
1826     {
1827         std::unique_lock<std::mutex> lock(mutex_);
1828         if (transformHint_ != transformHint) {
1829             transformHint_ = transformHint;
1830         } else {
1831             return GSERROR_OK;
1832         }
1833     }
1834 
1835     std::map<uint64_t, sptr<IProducerListener>> propertyListeners;
1836     {
1837         std::lock_guard<std::mutex> lockGuard(propertyChangeMutex_);
1838         if (propertyChangeListeners_.empty()) {
1839             return GSERROR_OK;
1840         }
1841         propertyListeners = propertyChangeListeners_;
1842     }
1843     SurfaceProperty property = {
1844         .transformHint = transformHint,
1845     };
1846     for (const auto& item: propertyListeners) {
1847         SURFACE_TRACE_NAME_FMT("propertyListeners %u, val %d", item.first, (int)property.transformHint);
1848         if (producerId == item.first) {
1849             continue;
1850         }
1851         if (item.second->OnPropertyChange(property) != GSERROR_OK) {
1852             BLOGE("OnPropertyChange failed, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1853         }
1854     }
1855     return GSERROR_OK;
1856 }
1857 
GetTransformHint() const1858 GraphicTransformType BufferQueue::GetTransformHint() const
1859 {
1860     std::unique_lock<std::mutex> lock(mutex_);
1861     return transformHint_;
1862 }
1863 
SetSurfaceSourceType(OHSurfaceSource sourceType)1864 GSError BufferQueue::SetSurfaceSourceType(OHSurfaceSource sourceType)
1865 {
1866     std::unique_lock<std::mutex> lock(mutex_);
1867     sourceType_ = sourceType;
1868     return GSERROR_OK;
1869 }
1870 
GetSurfaceSourceType() const1871 OHSurfaceSource BufferQueue::GetSurfaceSourceType() const
1872 {
1873     std::unique_lock<std::mutex> lock(mutex_);
1874     return sourceType_;
1875 }
1876 
SetHdrWhitePointBrightness(float brightness)1877 GSError BufferQueue::SetHdrWhitePointBrightness(float brightness)
1878 {
1879     std::unique_lock<std::mutex> lock(mutex_);
1880     hdrWhitePointBrightness_ = brightness;
1881     return GSERROR_OK;
1882 }
1883 
SetSdrWhitePointBrightness(float brightness)1884 GSError BufferQueue::SetSdrWhitePointBrightness(float brightness)
1885 {
1886     std::unique_lock<std::mutex> lock(mutex_);
1887     sdrWhitePointBrightness_ = brightness;
1888     return GSERROR_OK;
1889 }
1890 
GetHdrWhitePointBrightness() const1891 float BufferQueue::GetHdrWhitePointBrightness() const
1892 {
1893     std::unique_lock<std::mutex> lock(mutex_);
1894     return hdrWhitePointBrightness_;
1895 }
1896 
GetSdrWhitePointBrightness() const1897 float BufferQueue::GetSdrWhitePointBrightness() const
1898 {
1899     std::unique_lock<std::mutex> lock(mutex_);
1900     return sdrWhitePointBrightness_;
1901 }
1902 
SetSurfaceAppFrameworkType(std::string appFrameworkType)1903 GSError BufferQueue::SetSurfaceAppFrameworkType(std::string appFrameworkType)
1904 {
1905     if (appFrameworkType.empty()) {
1906         return GSERROR_NO_ENTRY;
1907     }
1908     if (appFrameworkType.size() > MAXIMUM_LENGTH_OF_APP_FRAMEWORK) {
1909         return GSERROR_OUT_OF_RANGE;
1910     }
1911     std::unique_lock<std::mutex> lock(mutex_);
1912     appFrameworkType_ = appFrameworkType;
1913     return GSERROR_OK;
1914 }
1915 
GetSurfaceAppFrameworkType() const1916 std::string BufferQueue::GetSurfaceAppFrameworkType() const
1917 {
1918     std::unique_lock<std::mutex> lock(mutex_);
1919     return appFrameworkType_;
1920 }
1921 
SetBufferHold(bool hold)1922 GSError BufferQueue::SetBufferHold(bool hold)
1923 {
1924     std::unique_lock<std::mutex> lock(mutex_);
1925     isBufferHold_ = hold;
1926     return GSERROR_OK;
1927 }
1928 
SetBufferReallocFlag(bool flag)1929 GSError BufferQueue::SetBufferReallocFlag(bool flag)
1930 {
1931     std::unique_lock<std::mutex> lock(mutex_);
1932     for (auto mapIter = bufferQueueCache_.begin(); mapIter != bufferQueueCache_.end(); ++mapIter) {
1933         mapIter->second.isBufferNeedRealloc = flag;
1934     }
1935     return GSERROR_OK;
1936 }
1937 
SetBufferName(const std::string & bufferName)1938 GSError BufferQueue::SetBufferName(const std::string &bufferName)
1939 {
1940     std::unique_lock<std::mutex> lock(mutex_);
1941     bufferName_ = bufferName;
1942     return GSERROR_OK;
1943 }
1944 
SetScalingMode(uint32_t sequence,ScalingMode scalingMode)1945 GSError BufferQueue::SetScalingMode(uint32_t sequence, ScalingMode scalingMode)
1946 {
1947     std::lock_guard<std::mutex> lockGuard(mutex_);
1948     auto mapIter = bufferQueueCache_.find(sequence);
1949     if (mapIter == bufferQueueCache_.end()) {
1950         return GSERROR_NO_ENTRY;
1951     }
1952     mapIter->second.buffer->SetSurfaceBufferScalingMode(scalingMode);
1953     return GSERROR_OK;
1954 }
1955 
SetScalingMode(ScalingMode scalingMode)1956 GSError BufferQueue::SetScalingMode(ScalingMode scalingMode)
1957 {
1958     std::lock_guard<std::mutex> lockGuard(mutex_);
1959     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
1960         it->second.buffer->SetSurfaceBufferScalingMode(scalingMode);
1961     }
1962     scalingMode_ = scalingMode;
1963     return GSERROR_OK;
1964 }
1965 
GetScalingMode(uint32_t sequence,ScalingMode & scalingMode)1966 GSError BufferQueue::GetScalingMode(uint32_t sequence, ScalingMode &scalingMode)
1967 {
1968     std::lock_guard<std::mutex> lockGuard(mutex_);
1969     auto mapIter = bufferQueueCache_.find(sequence);
1970     if (mapIter == bufferQueueCache_.end()) {
1971         return GSERROR_NO_ENTRY;
1972     }
1973     scalingMode = mapIter->second.buffer->GetSurfaceBufferScalingMode();
1974     return GSERROR_OK;
1975 }
1976 
SetMetaData(uint32_t sequence,const std::vector<GraphicHDRMetaData> & metaData)1977 GSError BufferQueue::SetMetaData(uint32_t sequence, const std::vector<GraphicHDRMetaData> &metaData)
1978 {
1979     std::lock_guard<std::mutex> lockGuard(mutex_);
1980     if (metaData.size() == 0) {
1981         BLOGW("metaData size is 0, uniqueId: %{public}" PRIu64 ".", uniqueId_);
1982         return GSERROR_INVALID_ARGUMENTS;
1983     }
1984     auto mapIter = bufferQueueCache_.find(sequence);
1985     if (mapIter == bufferQueueCache_.end()) {
1986         return GSERROR_NO_ENTRY;
1987     }
1988     mapIter->second.metaData.clear();
1989     mapIter->second.metaData = metaData;
1990     mapIter->second.hdrMetaDataType = HDRMetaDataType::HDR_META_DATA;
1991     return GSERROR_OK;
1992 }
1993 
SetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey key,const std::vector<uint8_t> & metaData)1994 GSError BufferQueue::SetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey key,
1995                                     const std::vector<uint8_t> &metaData)
1996 {
1997     std::lock_guard<std::mutex> lockGuard(mutex_);
1998     if (key < GraphicHDRMetadataKey::GRAPHIC_MATAKEY_RED_PRIMARY_X ||
1999         key > GraphicHDRMetadataKey::GRAPHIC_MATAKEY_HDR_VIVID) {
2000         BLOGW("key is %{public}d, uniqueId: %{public}" PRIu64 ".", key, uniqueId_);
2001         return GSERROR_INVALID_ARGUMENTS;
2002     }
2003     if (metaData.size() == 0) {
2004         BLOGW("metaData size is 0, uniqueId: %{public}" PRIu64 ".", uniqueId_);
2005         return GSERROR_INVALID_ARGUMENTS;
2006     }
2007     auto mapIter = bufferQueueCache_.find(sequence);
2008     if (mapIter == bufferQueueCache_.end()) {
2009         return GSERROR_NO_ENTRY;
2010     }
2011     mapIter->second.metaDataSet.clear();
2012     mapIter->second.key = key;
2013     mapIter->second.metaDataSet = metaData;
2014     mapIter->second.hdrMetaDataType = HDRMetaDataType::HDR_META_DATA_SET;
2015     return GSERROR_OK;
2016 }
2017 
QueryMetaDataType(uint32_t sequence,HDRMetaDataType & type)2018 GSError BufferQueue::QueryMetaDataType(uint32_t sequence, HDRMetaDataType &type)
2019 {
2020     std::lock_guard<std::mutex> lockGuard(mutex_);
2021     auto mapIter = bufferQueueCache_.find(sequence);
2022     if (mapIter == bufferQueueCache_.end()) {
2023         return GSERROR_NO_ENTRY;
2024     }
2025     type = mapIter->second.hdrMetaDataType;
2026     return GSERROR_OK;
2027 }
2028 
GetMetaData(uint32_t sequence,std::vector<GraphicHDRMetaData> & metaData)2029 GSError BufferQueue::GetMetaData(uint32_t sequence, std::vector<GraphicHDRMetaData> &metaData)
2030 {
2031     std::lock_guard<std::mutex> lockGuard(mutex_);
2032     auto mapIter = bufferQueueCache_.find(sequence);
2033     if (mapIter == bufferQueueCache_.end()) {
2034         return GSERROR_NO_ENTRY;
2035     }
2036     metaData.clear();
2037     metaData = mapIter->second.metaData;
2038     return GSERROR_OK;
2039 }
2040 
GetMetaDataSet(uint32_t sequence,GraphicHDRMetadataKey & key,std::vector<uint8_t> & metaData)2041 GSError BufferQueue::GetMetaDataSet(uint32_t sequence, GraphicHDRMetadataKey &key,
2042                                     std::vector<uint8_t> &metaData)
2043 {
2044     std::lock_guard<std::mutex> lockGuard(mutex_);
2045     auto mapIter = bufferQueueCache_.find(sequence);
2046     if (mapIter == bufferQueueCache_.end()) {
2047         return GSERROR_NO_ENTRY;
2048     }
2049     metaData.clear();
2050     key = mapIter->second.key;
2051     metaData = mapIter->second.metaDataSet;
2052     return GSERROR_OK;
2053 }
2054 
SetTunnelHandle(const sptr<SurfaceTunnelHandle> & handle)2055 GSError BufferQueue::SetTunnelHandle(const sptr<SurfaceTunnelHandle> &handle)
2056 {
2057     std::lock_guard<std::mutex> lockGuard(mutex_);
2058     bool tunnelHandleChange = false;
2059     if (tunnelHandle_ == nullptr) {
2060         if (handle == nullptr) {
2061             BLOGW("tunnel handle is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
2062             return GSERROR_INVALID_ARGUMENTS;
2063         }
2064         tunnelHandleChange = true;
2065     } else {
2066         tunnelHandleChange = tunnelHandle_->Different(handle);
2067     }
2068     if (!tunnelHandleChange) {
2069         BLOGW("same tunnel handle, uniqueId: %{public}" PRIu64 ".", uniqueId_);
2070         return GSERROR_NO_ENTRY;
2071     }
2072     tunnelHandle_ = handle;
2073     sptr<IBufferConsumerListener> listener;
2074     IBufferConsumerListenerClazz *listenerClazz;
2075     {
2076         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
2077         listener = listener_;
2078         listenerClazz = listenerClazz_;
2079     }
2080     if (listener != nullptr) {
2081         SURFACE_TRACE_NAME("OnTunnelHandleChange");
2082         listener->OnTunnelHandleChange();
2083     } else if (listenerClazz != nullptr) {
2084         SURFACE_TRACE_NAME("OnTunnelHandleChange");
2085         listenerClazz->OnTunnelHandleChange();
2086     } else {
2087         return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
2088     }
2089     return GSERROR_OK;
2090 }
2091 
GetTunnelHandle()2092 sptr<SurfaceTunnelHandle> BufferQueue::GetTunnelHandle()
2093 {
2094     std::lock_guard<std::mutex> lockGuard(mutex_);
2095     return tunnelHandle_;
2096 }
2097 
SetPresentTimestamp(uint32_t sequence,const GraphicPresentTimestamp & timestamp)2098 GSError BufferQueue::SetPresentTimestamp(uint32_t sequence, const GraphicPresentTimestamp &timestamp)
2099 {
2100     std::lock_guard<std::mutex> lockGuard(mutex_);
2101     auto mapIter = bufferQueueCache_.find(sequence);
2102     if (mapIter == bufferQueueCache_.end()) {
2103         return GSERROR_NO_ENTRY;
2104     }
2105     mapIter->second.presentTimestamp = timestamp;
2106     return GSERROR_OK;
2107 }
2108 
GetPresentTimestamp(uint32_t sequence,GraphicPresentTimestampType type,int64_t & time)2109 GSError BufferQueue::GetPresentTimestamp(uint32_t sequence, GraphicPresentTimestampType type, int64_t &time)
2110 {
2111     std::lock_guard<std::mutex> lockGuard(mutex_);
2112     auto mapIter = bufferQueueCache_.find(sequence);
2113     if (mapIter == bufferQueueCache_.end()) {
2114         return GSERROR_NO_ENTRY;
2115     }
2116     if (type != mapIter->second.presentTimestamp.type) {
2117         BLOGE("seq: %{public}u, PresentTimestampType [%{public}d] is not supported, the supported type is [%{public}d],"
2118             "uniqueId: %{public}" PRIu64 ".", sequence, type,
2119             mapIter->second.presentTimestamp.type, uniqueId_);
2120         return GSERROR_NO_ENTRY;
2121     }
2122     switch (type) {
2123         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_DELAY: {
2124             time = mapIter->second.presentTimestamp.time;
2125             return GSERROR_OK;
2126         }
2127         case GraphicPresentTimestampType::GRAPHIC_DISPLAY_PTS_TIMESTAMP: {
2128             time = mapIter->second.presentTimestamp.time - mapIter->second.timestamp;
2129             return GSERROR_OK;
2130         }
2131         default: {
2132             BLOGE("seq: %{public}u, unsupported type: %{public}d, uniqueId: %{public}" PRIu64 ".",
2133                 sequence, type, uniqueId_);
2134             return GSERROR_TYPE_ERROR;
2135         }
2136     }
2137 }
2138 
SetSurfaceBufferGlobalAlphaUnlocked(sptr<SurfaceBuffer> buffer)2139 void BufferQueue::SetSurfaceBufferGlobalAlphaUnlocked(sptr<SurfaceBuffer> buffer)
2140 {
2141     std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
2142     if (globalAlpha_ < FORCE_GLOBAL_ALPHA_MIN || globalAlpha_ > FORCE_GLOBAL_ALPHA_MAX) {
2143         BLOGE("Invalid global alpha value: %{public}d, uniqueId: %{public}" PRIu64 ".", globalAlpha_, uniqueId_);
2144         return;
2145     }
2146     using namespace HDI::Display::Graphic::Common;
2147     V2_0::BufferHandleAttrKey key = V2_0::BufferHandleAttrKey::ATTRKEY_FORCE_GLOBAL_ALPHA;
2148     std::vector<uint8_t> values;
2149     auto ret = MetadataHelper::ConvertMetadataToVec(globalAlpha_, values);
2150     if (ret != GSERROR_OK) {
2151         BLOGE("Convert global alpha value failed, ret: %{public}d, value: %{public}d, uniqueId: %{public}" PRIu64 ".",
2152             ret, globalAlpha_, uniqueId_);
2153         return;
2154     }
2155     buffer->SetMetadata(key, values);
2156 }
2157 
SetGlobalAlpha(int32_t alpha)2158 GSError BufferQueue::SetGlobalAlpha(int32_t alpha)
2159 {
2160     std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
2161     globalAlpha_ = alpha;
2162     return GSERROR_OK;
2163 }
2164 
GetGlobalAlpha(int32_t & alpha)2165 GSError BufferQueue::GetGlobalAlpha(int32_t &alpha)
2166 {
2167     std::lock_guard<std::mutex> lockGuard(globalAlphaMutex_);
2168     alpha = globalAlpha_;
2169     return GSERROR_OK;
2170 }
2171 
SetRequestBufferNoblockMode(bool noblock)2172 GSError BufferQueue::SetRequestBufferNoblockMode(bool noblock)
2173 {
2174     std::lock_guard<std::mutex> lockGuard(mutex_);
2175     requestBufferNoBlockMode_ = noblock;
2176     return GSERROR_OK;
2177 }
2178 
GetRequestBufferNoblockMode(bool & noblock)2179 GSError BufferQueue::GetRequestBufferNoblockMode(bool &noblock)
2180 {
2181     std::lock_guard<std::mutex> lockGuard(mutex_);
2182     noblock = requestBufferNoBlockMode_;
2183     return GSERROR_OK;
2184 }
2185 
DumpMetadata(std::string & result,BufferElement element)2186 void BufferQueue::DumpMetadata(std::string &result, BufferElement element)
2187 {
2188     HDI::Display::Graphic::Common::V1_0::CM_ColorSpaceType colorSpaceType;
2189     MetadataHelper::GetColorSpaceType(element.buffer, colorSpaceType);
2190     HDI::Display::Graphic::Common::V1_0::CM_HDR_Metadata_Type hdrMetadataType =
2191         HDI::Display::Graphic::Common::V1_0::CM_METADATA_NONE;
2192     std::vector<uint8_t> dataStatic;
2193     std::vector<uint8_t> dataDynamic;
2194     MetadataHelper::GetHDRDynamicMetadata(element.buffer, dataDynamic);
2195     MetadataHelper::GetHDRStaticMetadata(element.buffer, dataStatic);
2196     MetadataHelper::GetHDRMetadataType(element.buffer, hdrMetadataType);
2197     result += std::to_string(colorSpaceType) + ", ";
2198     result += " [staticMetadata: ";
2199     for (auto x : dataStatic) {
2200         result += std::to_string(x);
2201         result += " ";
2202     }
2203     result += " ],[dynamicMetadata: ";
2204     for (auto x : dataDynamic) {
2205         result += std::to_string(x);
2206         result += " ";
2207     }
2208     result += " ],[metadataType: ";
2209     result += std::to_string(hdrMetadataType) + "],";
2210 }
2211 
DumpCache(std::string & result)2212 void BufferQueue::DumpCache(std::string &result)
2213 {
2214     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
2215         BufferElement element = it->second;
2216         if (BufferStateStrs.find(element.state) != BufferStateStrs.end()) {
2217             result += "        sequence = " + std::to_string(it->first) +
2218                 ", state = " + std::to_string(element.state) +
2219                 ", timestamp = " + std::to_string(element.timestamp);
2220         }
2221         for (decltype(element.damages.size()) i = 0; i < element.damages.size(); i++) {
2222             result += ", damagesRect = [" + std::to_string(i) + "] = [" +
2223             std::to_string(element.damages[i].x) + ", " +
2224             std::to_string(element.damages[i].y) + ", " +
2225             std::to_string(element.damages[i].w) + ", " +
2226             std::to_string(element.damages[i].h) + "],";
2227         }
2228         result += " config = [" + std::to_string(element.config.width) + "x" +
2229             std::to_string(element.config.height) + ", " +
2230             std::to_string(element.config.strideAlignment) + ", " +
2231             std::to_string(element.config.format) +", " +
2232             std::to_string(element.config.usage) + ", " +
2233             std::to_string(element.config.timeout) + ", " +
2234             std::to_string(element.config.colorGamut) + ", " +
2235             std::to_string(element.config.transform) + "],";
2236         DumpMetadata(result, element);
2237         result += " scalingMode = " + std::to_string(element.buffer->GetSurfaceBufferScalingMode()) + ",";
2238         result += " HDR = " + std::to_string(element.hdrMetaDataType) + ", ";
2239 
2240         double bufferMemSize = 0;
2241         result += " bufferWith = " + std::to_string(element.buffer->GetWidth()) +
2242                 ", bufferHeight = " + std::to_string(element.buffer->GetHeight());
2243         bufferMemSize = static_cast<double>(element.buffer->GetSize()) / BUFFER_MEMSIZE_RATE;
2244 
2245         std::ostringstream ss;
2246         ss.precision(BUFFER_MEMSIZE_FORMAT);
2247         ss.setf(std::ios::fixed);
2248         ss << bufferMemSize;
2249         std::string str = ss.str();
2250         result += ", bufferMemSize = " + str + "(KiB).\n";
2251     }
2252 }
2253 
Dump(std::string & result)2254 void BufferQueue::Dump(std::string &result)
2255 {
2256     std::unique_lock<std::mutex> lock(mutex_);
2257     std::ostringstream ss;
2258     ss.precision(BUFFER_MEMSIZE_FORMAT);
2259     ss.setf(std::ios::fixed);
2260     static double allSurfacesMemSize = 0;
2261     uint64_t totalBufferListSize = 0;
2262     double memSizeInKB = 0;
2263 
2264     isAllocatingBufferCon_.wait(lock, [this]() { return !isAllocatingBuffer_; });
2265     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
2266         BufferElement element = it->second;
2267         if (element.buffer != nullptr) {
2268             totalBufferListSize += element.buffer->GetSize();
2269         }
2270     }
2271     memSizeInKB = static_cast<double>(totalBufferListSize) / BUFFER_MEMSIZE_RATE;
2272 
2273     allSurfacesMemSize += memSizeInKB;
2274     uint32_t resultLen = result.size();
2275     std::string dumpEndFlag = "dumpend";
2276     if (resultLen > dumpEndFlag.size() && resultLen > 1) {
2277         std::string dumpEndIn(result, resultLen - dumpEndFlag.size(), resultLen - 1);
2278         if (dumpEndIn == dumpEndFlag) {
2279             ss << allSurfacesMemSize;
2280             std::string dumpEndStr = ss.str();
2281             result.erase(resultLen - dumpEndFlag.size(), resultLen - 1);
2282             result += dumpEndStr + " KiB.\n";
2283             allSurfacesMemSize = 0;
2284             return;
2285         }
2286     }
2287 
2288     ss.str("");
2289     ss << memSizeInKB;
2290     std::string str = ss.str();
2291     result.append("\nBufferQueue:\n");
2292     result += "      default-size = [" + std::to_string(defaultWidth_) + "x" + std::to_string(defaultHeight_) + "]" +
2293         ", FIFO = " + std::to_string(bufferQueueSize_) +
2294         ", name = " + name_ +
2295         ", uniqueId = " + std::to_string(uniqueId_) +
2296         ", usedBufferListLen = " + std::to_string(GetUsedSize()) +
2297         ", freeBufferListLen = " + std::to_string(freeList_.size()) +
2298         ", dirtyBufferListLen = " + std::to_string(dirtyList_.size()) +
2299         ", totalBuffersMemSize = " + str + "(KiB)" +
2300         ", hdrWhitePointBrightness = " + std::to_string(hdrWhitePointBrightness_) +
2301         ", sdrWhitePointBrightness = " + std::to_string(sdrWhitePointBrightness_) +
2302         ", lockLastFlushedBuffer seq = " + std::to_string(acquireLastFlushedBufSequence_) + "\n";
2303 
2304     result.append("      bufferQueueCache:\n");
2305     DumpCache(result);
2306 }
2307 
DumpCurrentFrameLayer()2308 void BufferQueue::DumpCurrentFrameLayer()
2309 {
2310     SURFACE_TRACE_NAME_FMT("BufferQueue::DumpCurrentFrameLayer start dump");
2311     bool dumpStaticFrameEnabled = GetBoolParameter("debug.dumpstaticframe.enabled", "0");
2312     if (!dumpStaticFrameEnabled) {
2313         BLOGI("debug.dumpstaticframe.enabled not exist or not enable!");
2314         return ;
2315     }
2316 
2317     std::lock_guard<std::mutex> lockGuard(mutex_);
2318     uint8_t cnt = 0;
2319     for (auto it = bufferQueueCache_.begin(); it != bufferQueueCache_.end(); it++) {
2320         BufferElement element = it->second;
2321         if (element.state != BUFFER_STATE_ACQUIRED) {
2322             continue;
2323         }
2324         if (element.buffer != nullptr) {
2325             cnt++;
2326             DumpToFileAsync(GetRealPid(), name_, element.buffer);
2327         }
2328     }
2329     BLOGD("BufferQueue::DumpCurrentFrameLayer dump %{public}d buffer", cnt);
2330 }
2331 
GetStatusLocked() const2332 bool BufferQueue::GetStatusLocked() const
2333 {
2334     return isValidStatus_;
2335 }
2336 
GetStatus() const2337 bool BufferQueue::GetStatus() const
2338 {
2339     std::lock_guard<std::mutex> lockGuard(mutex_);
2340     return GetStatusLocked();
2341 }
2342 
SetStatus(bool status)2343 void BufferQueue::SetStatus(bool status)
2344 {
2345     std::lock_guard<std::mutex> lockGuard(mutex_);
2346     isValidStatus_ = status;
2347     waitReqCon_.notify_all();
2348 }
2349 
GetAvailableBufferCount()2350 uint32_t BufferQueue::GetAvailableBufferCount()
2351 {
2352     std::lock_guard<std::mutex> lockGuard(mutex_);
2353     return static_cast<uint32_t>(dirtyList_.size());
2354 }
2355 
SetConnectedPidLocked(int32_t connectedPid)2356 void BufferQueue::SetConnectedPidLocked(int32_t connectedPid)
2357 {
2358     connectedPid_ = connectedPid;
2359 }
2360 
2361 /**
2362  * @brief Optimize the original FlushBuffer to reduce segmentation locking.
2363  */
FlushBufferImprovedLocked(uint32_t sequence,sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,const BufferFlushConfigWithDamages & config,std::unique_lock<std::mutex> & lock)2364 GSError BufferQueue::FlushBufferImprovedLocked(uint32_t sequence, sptr<BufferExtraData> &bedata,
2365     const sptr<SyncFence> &fence, const BufferFlushConfigWithDamages &config, std::unique_lock<std::mutex> &lock)
2366 {
2367     if (!GetStatusLocked()) {
2368         SURFACE_TRACE_NAME_FMT("status: %d", GetStatusLocked());
2369         BLOGN_FAILURE_RET(GSERROR_NO_CONSUMER);
2370     }
2371     // check param
2372     auto sret = CheckFlushConfig(config);
2373     if (sret != GSERROR_OK) {
2374         BLOGE("CheckFlushConfig ret: %{public}d, uniqueId: %{public}" PRIu64 ".", sret, uniqueId_);
2375         return sret;
2376     }
2377 
2378     sret = CheckBufferQueueCacheLocked(sequence);
2379     if (sret != GSERROR_OK) {
2380         return sret;
2381     }
2382 
2383     {
2384         std::lock_guard<std::mutex> lockGuard(listenerMutex_);
2385         if (listener_ == nullptr && listenerClazz_ == nullptr) {
2386             BLOGE("listener is nullptr, uniqueId: %{public}" PRIu64 ".", uniqueId_);
2387             return SURFACE_ERROR_CONSUMER_UNREGISTER_LISTENER;
2388         }
2389     }
2390     sret = DoFlushBufferLocked(sequence, bedata, fence, config, lock);
2391     if (sret != GSERROR_OK) {
2392         return sret;
2393     }
2394     return sret;
2395 }
2396 
RequestAndDetachBuffer(const BufferRequestConfig & config,sptr<BufferExtraData> & bedata,struct IBufferProducer::RequestBufferReturnValue & retval)2397 GSError BufferQueue::RequestAndDetachBuffer(const BufferRequestConfig& config, sptr<BufferExtraData>& bedata,
2398     struct IBufferProducer::RequestBufferReturnValue& retval)
2399 {
2400     SURFACE_TRACE_NAME_FMT("RequestAndDetachBuffer queueId: %" PRIu64, uniqueId_);
2401     std::unique_lock<std::mutex> lock(mutex_);
2402     auto ret = RequestBufferLocked(config, bedata, retval, lock);
2403     if (ret != GSERROR_OK) {
2404         return ret;
2405     }
2406     return DetachBufferFromQueueLocked(retval.sequence, InvokerType::PRODUCER_INVOKER, lock, false);
2407 }
2408 
AttachAndFlushBuffer(sptr<SurfaceBuffer> & buffer,sptr<BufferExtraData> & bedata,const sptr<SyncFence> & fence,BufferFlushConfigWithDamages & config,bool needMap)2409 GSError BufferQueue::AttachAndFlushBuffer(sptr<SurfaceBuffer>& buffer, sptr<BufferExtraData>& bedata,
2410     const sptr<SyncFence>& fence, BufferFlushConfigWithDamages& config, bool needMap)
2411 {
2412     SURFACE_TRACE_NAME_FMT("AttachAndFlushBuffer queueId: %" PRIu64 " sequence: %u", uniqueId_, buffer->GetSeqNum());
2413     GSError ret;
2414     {
2415         std::unique_lock<std::mutex> lock(mutex_);
2416         ret = AttachBufferToQueueLocked(buffer, InvokerType::PRODUCER_INVOKER, needMap);
2417         if (ret != GSERROR_OK) {
2418             return ret;
2419         }
2420         uint32_t sequence = buffer->GetSeqNum();
2421         ret = FlushBufferImprovedLocked(sequence, bedata, fence, config, lock);
2422         if (ret != GSERROR_OK) {
2423             for (auto it = dirtyList_.begin(); it != dirtyList_.end(); it++) {
2424                 if (*it == sequence) {
2425                     dirtyList_.erase(it);
2426                     break;
2427                 }
2428             }
2429             bufferQueueCache_.erase(sequence);
2430             lppBufferCache_.erase(sequence);
2431             return ret;
2432         }
2433     }
2434     CallConsumerListener();
2435     return ret;
2436 }
2437 
GetLastFlushedDesiredPresentTimeStamp(int64_t & lastFlushedDesiredPresentTimeStamp)2438 GSError BufferQueue::GetLastFlushedDesiredPresentTimeStamp(int64_t &lastFlushedDesiredPresentTimeStamp)
2439 {
2440     std::lock_guard<std::mutex> lockGuard(mutex_);
2441     lastFlushedDesiredPresentTimeStamp = lastFlushedDesiredPresentTimeStamp_;
2442     return GSERROR_OK;
2443 }
2444 
GetFrontDesiredPresentTimeStamp(int64_t & desiredPresentTimeStamp,bool & isAutoTimeStamp)2445 GSError BufferQueue::GetFrontDesiredPresentTimeStamp(int64_t &desiredPresentTimeStamp, bool &isAutoTimeStamp)
2446 {
2447     std::lock_guard<std::mutex> lockGuard(mutex_);
2448     std::list<uint32_t>::iterator frontSequence = dirtyList_.begin();
2449     if (frontSequence == dirtyList_.end()) {
2450         return GSERROR_NO_BUFFER;
2451     }
2452     auto iter = bufferQueueCache_.find(*frontSequence);
2453     if (iter == bufferQueueCache_.end()) {
2454         return GSERROR_NO_BUFFER;
2455     }
2456     desiredPresentTimeStamp = (iter->second).desiredPresentTimestamp;
2457     isAutoTimeStamp = (iter->second).isAutoTimestamp;
2458     return GSERROR_OK;
2459 }
2460 
GetBufferSupportFastCompose(bool & bufferSupportFastCompose)2461 GSError BufferQueue::GetBufferSupportFastCompose(bool &bufferSupportFastCompose)
2462 {
2463     std::lock_guard<std::mutex> lockGuard(mutex_);
2464     bufferSupportFastCompose = bufferSupportFastCompose_;
2465     return GSERROR_OK;
2466 }
2467 
GetBufferCacheConfig(const sptr<SurfaceBuffer> & buffer,BufferRequestConfig & config)2468 GSError BufferQueue::GetBufferCacheConfig(const sptr<SurfaceBuffer>& buffer, BufferRequestConfig& config)
2469 {
2470     std::lock_guard<std::mutex> lockGuard(mutex_);
2471     auto iter = bufferQueueCache_.find(buffer->GetSeqNum());
2472     if (iter == bufferQueueCache_.end()) {
2473         return GSERROR_BUFFER_NOT_INCACHE;
2474     }
2475     config = iter->second.config;
2476     return GSERROR_OK;
2477 }
2478 
GetCycleBuffersNumber(uint32_t & cycleBuffersNumber)2479 GSError BufferQueue::GetCycleBuffersNumber(uint32_t& cycleBuffersNumber)
2480 {
2481     std::lock_guard<std::mutex> lockGuard(mutex_);
2482     return BufferUtilGetCycleBuffersNumber(cycleBuffersNumber, rotatingBufferNumber_, bufferQueueSize_);
2483 }
2484 
SetCycleBuffersNumber(uint32_t cycleBuffersNumber)2485 GSError BufferQueue::SetCycleBuffersNumber(uint32_t cycleBuffersNumber)
2486 {
2487     // 2 : two times of the max queue size
2488     if (cycleBuffersNumber == 0 || cycleBuffersNumber > SURFACE_MAX_QUEUE_SIZE * 2) {
2489         BLOGE("Set rotating buffers number : %{public}u failed", cycleBuffersNumber);
2490         return GSERROR_INVALID_ARGUMENTS;
2491     }
2492     std::lock_guard<std::mutex> lockGuard(mutex_);
2493     rotatingBufferNumber_ = cycleBuffersNumber;
2494     return GSERROR_OK;
2495 }
2496 
GetFrameGravity(int32_t & frameGravity)2497 GSError BufferQueue::GetFrameGravity(int32_t &frameGravity)
2498 {
2499     std::lock_guard<std::mutex> lockGuard(mutex_);
2500     frameGravity = frameGravity_;
2501     return GSERROR_OK;
2502 }
2503 
SetFrameGravity(int32_t frameGravity)2504 GSError BufferQueue::SetFrameGravity(int32_t frameGravity)
2505 {
2506     if (frameGravity < MIN_FRAME_GRAVITY || frameGravity > MAX_FRAME_GRAVITY) {
2507         BLOGE("Set frame gravity: %{public}d failed", frameGravity);
2508         return GSERROR_INVALID_ARGUMENTS;
2509     }
2510     std::lock_guard<std::mutex> lockGuard(mutex_);
2511     frameGravity_ = frameGravity;
2512     return GSERROR_OK;
2513 }
2514 
GetFixedRotation(int32_t & fixedRotation)2515 GSError BufferQueue::GetFixedRotation(int32_t &fixedRotation)
2516 {
2517     std::lock_guard<std::mutex> lockGuard(mutex_);
2518     fixedRotation = fixedRotation_;
2519     return GSERROR_OK;
2520 }
2521 
SetFixedRotation(int32_t fixedRotation)2522 GSError BufferQueue::SetFixedRotation(int32_t fixedRotation)
2523 {
2524     if (fixedRotation < MIN_FIXED_ROTATION || fixedRotation > MAX_FIXED_ROTATION) {
2525         BLOGE("Set fixed rotation: %{public}d failed", fixedRotation);
2526         return GSERROR_INVALID_ARGUMENTS;
2527     }
2528     std::lock_guard<std::mutex> lockGuard(mutex_);
2529     fixedRotation_ = fixedRotation;
2530     return GSERROR_OK;
2531 }
2532 
AllocBuffers(const BufferRequestConfig & config,uint32_t allocBufferCount,std::map<uint32_t,sptr<SurfaceBuffer>> & surfaceBufferCache)2533 void BufferQueue::AllocBuffers(const BufferRequestConfig &config, uint32_t allocBufferCount,
2534     std::map<uint32_t, sptr<SurfaceBuffer>> &surfaceBufferCache)
2535 {
2536     SURFACE_TRACE_NAME_FMT("AllocBuffers allocBufferCount %u width %d height %d format %d usage %d",
2537         allocBufferCount, config.width, config.height, config.format, config.usage);
2538     for (uint32_t i = 0; i < allocBufferCount; i++) {
2539         sptr<SurfaceBuffer> bufferImpl = new SurfaceBufferImpl();
2540         uint32_t sequence = bufferImpl->GetSeqNum();
2541 
2542         GSError ret = bufferImpl->Alloc(config);
2543         if (ret != GSERROR_OK) {
2544             BLOGE("Alloc failed, sequence:%{public}u, ret:%{public}d, uniqueId: %{public}" PRIu64 ".",
2545                 sequence, ret, uniqueId_);
2546             continue;
2547         }
2548         surfaceBufferCache[sequence] = bufferImpl;
2549     }
2550 }
2551 
PreAllocBuffers(const BufferRequestConfig & config,uint32_t allocBufferCount)2552 GSError BufferQueue::PreAllocBuffers(const BufferRequestConfig &config, uint32_t allocBufferCount)
2553 {
2554     SURFACE_TRACE_NAME_FMT("PreAllocBuffers bufferQueueSize %u cacheSize %u allocBufferCount %u",
2555         bufferQueueSize_, bufferQueueCache_.size(), allocBufferCount);
2556     if (config.width <=0 || config.height <= 0 || config.format < GraphicPixelFormat::GRAPHIC_PIXEL_FMT_CLUT8 ||
2557         config.format >= GraphicPixelFormat::GRAPHIC_PIXEL_FMT_BUTT || allocBufferCount == 0) {
2558         return GSERROR_INVALID_ARGUMENTS;
2559     }
2560     {
2561         std::lock_guard<std::mutex> lockGuard(mutex_);
2562         if (allocBufferCount > bufferQueueSize_ - detachReserveSlotNum_ - bufferQueueCache_.size()) {
2563             allocBufferCount = bufferQueueSize_ - detachReserveSlotNum_ - bufferQueueCache_.size();
2564         }
2565     }
2566     if (allocBufferCount == 0) {
2567         return SURFACE_ERROR_BUFFER_QUEUE_FULL;
2568     }
2569 
2570     std::map<uint32_t, sptr<SurfaceBuffer>> surfaceBufferCache;
2571     AllocBuffers(config, allocBufferCount, surfaceBufferCache);
2572     {
2573         std::lock_guard<std::mutex> lockGuard(mutex_);
2574         for (auto iter = surfaceBufferCache.begin(); iter != surfaceBufferCache.end(); ++iter) {
2575             if (bufferQueueCache_.size() >= bufferQueueSize_- detachReserveSlotNum_) {
2576                 BLOGW("CacheSize: %{public}zu, QueueSize: %{public}d, allocBufferCount: %{public}zu,"
2577                    " queId: %{public}" PRIu64, bufferQueueCache_.size(), bufferQueueSize_,
2578                    surfaceBufferCache.size(), uniqueId_);
2579                 return SURFACE_ERROR_OUT_OF_RANGE;
2580             }
2581             BufferElement ele = {
2582                 .buffer = iter->second,
2583                 .state = BUFFER_STATE_REQUESTED,
2584                 .isDeleting = false,
2585                 .config = config,
2586                 .fence = SyncFence::InvalidFence(),
2587                 .isPreAllocBuffer = true,
2588             };
2589             bufferQueueCache_[iter->first] = ele;
2590             freeList_.push_back(iter->first);
2591         }
2592     }
2593     return GSERROR_OK;
2594 }
2595 
MarkBufferReclaimableByIdLocked(uint32_t sequence)2596 void BufferQueue::MarkBufferReclaimableByIdLocked(uint32_t sequence)
2597 {
2598     auto it = bufferQueueCache_.find(sequence);
2599     if (it != bufferQueueCache_.end()) {
2600         auto buffer = it->second.buffer;
2601         if (buffer != nullptr) {
2602             SURFACE_TRACE_NAME_FMT("MarkBufferReclaimableByIdLocked name: %s, queueId: %" PRIu64 " fd: %d size: %u",
2603                 name_.c_str(), uniqueId_, buffer->GetFileDescriptor(), buffer->GetSize());
2604             int32_t ret = ioctl(buffer->GetFileDescriptor(), DMA_BUF_SET_TYPE, "last_buffer");
2605             BLOGI("MarkBufferReclaimable fd=%{public}d, type=last_buffer, ret=%{public}d",
2606                 buffer->GetFileDescriptor(), ret);
2607         }
2608     }
2609 }
2610 
GetLastConsumeTime(int64_t & lastConsumeTime)2611 GSError BufferQueue::GetLastConsumeTime(int64_t &lastConsumeTime)
2612 {
2613     std::lock_guard<std::mutex> lockGuard(mutex_);
2614     lastConsumeTime = lastConsumeTime_;
2615     return GSERROR_OK;
2616 }
2617 
SetMaxQueueSize(uint32_t queueSize)2618 GSError BufferQueue::SetMaxQueueSize(uint32_t queueSize)
2619 {
2620     if (queueSize == 0 || queueSize > SURFACE_MAX_QUEUE_SIZE) {
2621         BLOGW("invalid queueSize: %{public}u, uniqueId: %{public}" PRIu64 ".",
2622             queueSize, uniqueId_);
2623         return GSERROR_INVALID_ARGUMENTS;
2624     }
2625     std::unique_lock<std::mutex> lock(mutex_);
2626     maxQueueSize_ = queueSize;
2627     if (bufferQueueSize_ > maxQueueSize_) {
2628         return SetQueueSizeLocked(maxQueueSize_, lock);
2629     }
2630     return GSERROR_OK;
2631 }
2632 
GetMaxQueueSize(uint32_t & queueSize) const2633 GSError BufferQueue::GetMaxQueueSize(uint32_t &queueSize) const
2634 {
2635     std::lock_guard<std::mutex> lockGuard(mutex_);
2636     queueSize = maxQueueSize_;
2637     return GSERROR_OK;
2638 }
2639 
SetIsActiveGame(bool isActiveGame)2640 GSError BufferQueue::SetIsActiveGame(bool isActiveGame)
2641 {
2642     std::lock_guard<std::mutex> lockGuard(mutex_);
2643     isActiveGame_ = isActiveGame;
2644     return GSERROR_OK;
2645 }
2646 
AcquireLppBuffer(sptr<SurfaceBuffer> & buffer,sptr<SyncFence> & fence,int64_t & timestamp,std::vector<Rect> & damages)2647 GSError BufferQueue::AcquireLppBuffer(
2648     sptr<SurfaceBuffer> &buffer, sptr<SyncFence> &fence, int64_t &timestamp, std::vector<Rect> &damages)
2649 {
2650     std::lock_guard<std::mutex> lockGuard(mutex_);
2651     if (sourceType_ != OHSurfaceSource::OH_SURFACE_SOURCE_LOWPOWERVIDEO || lppSlotInfo_ == nullptr) {
2652         BLOGD("AcquireLppBuffer source is not Lpp");
2653         return GSERROR_TYPE_ERROR;
2654     }
2655     SURFACE_TRACE_NAME_FMT("AcquireLppBuffer name: %s queueId: %" PRIu64, name_.c_str(), uniqueId_);
2656     const auto slotInfo = *lppSlotInfo_;
2657     int32_t readOffset = -1;
2658     if (slotInfo.readOffset < 0 || slotInfo.readOffset >= LPP_SLOT_SIZE || slotInfo.writeOffset < 0 ||
2659         slotInfo.writeOffset >= LPP_SLOT_SIZE) {
2660         BLOGW("AcquireLppBuffer name: slotInfo Parameter validation failed");
2661         return GSERROR_INVALID_ARGUMENTS;
2662     }
2663     int32_t maxWriteOffset = (slotInfo.writeOffset + LPP_SLOT_SIZE - 1) % LPP_SLOT_SIZE;
2664     if (slotInfo.writeOffset == lastLppWriteOffset_ && slotInfo.readOffset == maxWriteOffset) {
2665         lppSkipCount_++;
2666         return GSERROR_NO_BUFFER;
2667     }
2668     readOffset = (slotInfo.writeOffset + LPP_SLOT_SIZE - 1) % LPP_SLOT_SIZE;
2669     lppSkipCount_ = 0;
2670     const auto bufferSlot = slotInfo.slot[readOffset];
2671     lppSlotInfo_->readOffset = readOffset;
2672     lastLppWriteOffset_ = slotInfo.writeOffset;
2673     uint32_t seqId = bufferSlot.seqId;
2674 
2675     auto bufferPtr = lppBufferCache_.find(seqId);
2676     if (bufferPtr == lppBufferCache_.end()) {
2677         SURFACE_TRACE_NAME_FMT("AcquireLppBuffer buffer cache no find buffer, seqId = [%d]", seqId);
2678         return GSERROR_NO_BUFFER;
2679     }
2680     buffer = bufferPtr->second;
2681     fence = SyncFence::INVALID_FENCE;
2682     damages = {{
2683         .x = bufferSlot.damage[0],
2684         .y = bufferSlot.damage[1],
2685         .w = bufferSlot.damage[2],
2686         .h = bufferSlot.damage[3],
2687     }};
2688     buffer->SetSurfaceBufferTransform(transform_);
2689     timestamp = bufferSlot.timestamp;
2690     return GSERROR_OK;
2691 }
2692 
SetLppShareFd(int fd,bool state)2693 GSError BufferQueue::SetLppShareFd(int fd, bool state)
2694 {
2695     if (state) {
2696         std::lock_guard<std::mutex> lockGuard(mutex_);
2697         if (sourceType_ != OHSurfaceSource::OH_SURFACE_SOURCE_LOWPOWERVIDEO) {
2698             BLOGW("SetLppShareFd source is not Lpp");
2699             return GSERROR_TYPE_ERROR;
2700         }
2701         if (lppSlotInfo_ != nullptr) {
2702             munmap(static_cast<void *>(lppSlotInfo_), LPP_SHARED_MEM_SIZE);
2703             lppSlotInfo_ = nullptr;
2704         }
2705         void *lppPtr = mmap(nullptr, LPP_SHARED_MEM_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2706         if (lppPtr == nullptr || lppPtr == MAP_FAILED) {
2707             BLOGW("SetLppShareFd set fd, fd parse error");
2708             return GSERROR_INVALID_ARGUMENTS;
2709         }
2710         lppSlotInfo_ = static_cast<LppSlotInfo *>(lppPtr);
2711         BLOGI("SetLppShareFd set fd success");
2712     } else {
2713         FlushLppBuffer();
2714         std::lock_guard<std::mutex> lockGuard(mutex_);
2715         if (lppSlotInfo_ != nullptr) {
2716             munmap(static_cast<void *>(lppSlotInfo_), LPP_SHARED_MEM_SIZE);
2717             lppSlotInfo_ = nullptr;
2718             BLOGI("SetLppShareFd remove fd success");
2719         }
2720     }
2721     return GSERROR_OK;
2722 }
2723 
FlushLppBuffer()2724 void BufferQueue::FlushLppBuffer()
2725 {
2726     sptr<SurfaceBuffer> buffer = nullptr;
2727     sptr<SyncFence> acquireFence = SyncFence::InvalidFence();
2728     int64_t timestamp = 0;
2729     std::vector<Rect> damages;
2730     AcquireLppBuffer(buffer, acquireFence, timestamp, damages);
2731     if (buffer == nullptr) {
2732         return;
2733     }
2734     BufferFlushConfigWithDamages cfg{
2735         .damages = damages,
2736         .timestamp = timestamp,
2737         .desiredPresentTimestamp = -1,
2738     };
2739     FlushBuffer(buffer->GetSeqNum(), buffer->GetExtraData(), acquireFence, cfg);
2740 }
2741 
SetLppDrawSource(bool isShbSource,bool isRsSource)2742 GSError BufferQueue::SetLppDrawSource(bool isShbSource, bool isRsSource)
2743 {
2744     std::unique_lock<std::mutex> lock(mutex_);
2745     SURFACE_TRACE_NAME_FMT("SetLppDrawSource sourceType: [%d], lppSlotInfo: [%d], lppSkipCount: [%d], isShbSource: [%d]"
2746         " ,isRsSource: [%d]", sourceType_,  (lppSlotInfo_ == nullptr), lppSkipCount_, isShbSource, isRsSource);
2747     if (sourceType_ != OHSurfaceSource::OH_SURFACE_SOURCE_LOWPOWERVIDEO || lppSlotInfo_ == nullptr) {
2748         isRsDrawLpp_ = false;
2749         return GSERROR_TYPE_ERROR;
2750     }
2751     if (lppSkipCount_ >= MAX_LPP_SKIP_COUNT) {
2752         return GSERROR_OUT_OF_RANGE;
2753     }
2754     lppSlotInfo_->isStopShbDraw = !isShbSource;
2755     isRsDrawLpp_ = isRsSource;
2756     return GSERROR_OK;
2757 }
2758 
SetAlphaType(GraphicAlphaType alphaType)2759 GSError BufferQueue::SetAlphaType(GraphicAlphaType alphaType)
2760 {
2761     if (alphaType < GraphicAlphaType::GRAPHIC_ALPHATYPE_UNKNOWN ||
2762         alphaType > GraphicAlphaType::GRAPHIC_ALPHATYPE_UNPREMUL) {
2763         BLOGE("Set alpha Type: %{public}d failed", alphaType);
2764         return GSERROR_INVALID_ARGUMENTS;
2765     }
2766     std::lock_guard<std::mutex> lockGuard(mutex_);
2767     alphaType_ = alphaType;
2768     return GSERROR_OK;
2769 }
2770 
GetAlphaType(GraphicAlphaType & alphaType)2771 GSError BufferQueue::GetAlphaType(GraphicAlphaType &alphaType)
2772 {
2773     std::lock_guard<std::mutex> lockGuard(mutex_);
2774     alphaType = alphaType_;
2775     return GSERROR_OK;
2776 }
2777 
SetIsPriorityAlloc(bool isPriorityAlloc)2778 GSError BufferQueue::SetIsPriorityAlloc(bool isPriorityAlloc)
2779 {
2780     std::lock_guard<std::mutex> lockGuard(mutex_);
2781     isPriorityAlloc_ = isPriorityAlloc;
2782     return GSERROR_OK;
2783 }
2784 }; // namespace OHOS
2785